From 58e1010da3c15e7bdf426b0a3d4b13dba1b7d055 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 26 Nov 2007 21:21:49 +0100 Subject: sched: fix RLIMIT_CPU comment Devan Lippman noticed that the RLIMIT_CPU comment in resource.h is incorrect: the field is in seconds, not msecs. We used msecs in earlier versions of the patch but that got changed. Found-by: Devan Lippman Signed-off-by: Ingo Molnar --- include/asm-generic/resource.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h index cfe3692b23e..a4a22cc3589 100644 --- a/include/asm-generic/resource.h +++ b/include/asm-generic/resource.h @@ -12,7 +12,7 @@ * then it defines them prior including asm-generic/resource.h. ) */ -#define RLIMIT_CPU 0 /* CPU time in ms */ +#define RLIMIT_CPU 0 /* CPU time in sec */ #define RLIMIT_FSIZE 1 /* Maximum filesize */ #define RLIMIT_DATA 2 /* max data size */ #define RLIMIT_STACK 3 /* max stack size */ -- cgit v1.2.3-70-g09d2 From 421d99193537a6522aac2148286f08792167d5fd Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 17 Dec 2007 16:20:27 -0800 Subject: quicklist: Set tlb->need_flush if pages are remaining in quicklist 0 This ensures that the quicklists are drained. Otherwise draining may only occur when the processor reaches an idle state. Fixes fatal leakage of pgd_t's on 2.6.22 and later. Signed-off-by: Christoph Lameter Reported-by: Dhaval Giani Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/tlb.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index f490e43a90b..799307eea40 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -14,6 +14,7 @@ #define _ASM_GENERIC__TLB_H #include +#include #include #include @@ -85,6 +86,9 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { +#ifdef CONFIG_QUICKLIST + tlb->need_flush += &__get_cpu_var(quicklist)[0].nr_pages != 0; +#endif tlb_flush_mmu(tlb, start, end); /* keep the page table cache within bounds */ -- cgit v1.2.3-70-g09d2 From 49eaaa1a6c950e7a92c4386c199b8ec950f840b9 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 26 Dec 2007 12:43:01 -0800 Subject: Revert quicklist need->flush fix Did not fix the reported issue. Apart from other weirdness this causes a bad link between the TLB flushing logic and the quicklists. If there is indeed an issue that an arch needs a tlb flush before free then the arch code needs to set tlb->need_flush before calling quicklist_free. Signed-off-by: Christoph Lameter Signed-off-by: Linus Torvalds --- include/asm-generic/tlb.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 799307eea40..75f2bfab614 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -86,9 +86,6 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { -#ifdef CONFIG_QUICKLIST - tlb->need_flush += &__get_cpu_var(quicklist)[0].nr_pages != 0; -#endif tlb_flush_mmu(tlb, start, end); /* keep the page table cache within bounds */ -- cgit v1.2.3-70-g09d2 From 78f2c7db6068fd6ef75b8c120f04a388848eacb5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 25 Jan 2008 21:08:27 +0100 Subject: sched: SCHED_FIFO/SCHED_RR watchdog timer Introduce a new rlimit that allows the user to set a runtime timeout on real-time tasks their slice. Once this limit is exceeded the task will receive SIGXCPU. So it measures runtime since the last sleep. Input and ideas by Thomas Gleixner and Lennart Poettering. Signed-off-by: Peter Zijlstra CC: Lennart Poettering CC: Michael Kerrisk CC: Ulrich Drepper Signed-off-by: Ingo Molnar --- include/asm-generic/resource.h | 5 +++-- include/linux/sched.h | 1 + kernel/posix-cpu-timers.c | 29 +++++++++++++++++++++++++++++ kernel/sched_rt.c | 30 ++++++++++++++++++++++++++++++ 4 files changed, 63 insertions(+), 2 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h index a4a22cc3589..587566f95f6 100644 --- a/include/asm-generic/resource.h +++ b/include/asm-generic/resource.h @@ -44,8 +44,8 @@ #define RLIMIT_NICE 13 /* max nice prio allowed to raise to 0-39 for nice level 19 .. -20 */ #define RLIMIT_RTPRIO 14 /* maximum realtime priority */ - -#define RLIM_NLIMITS 15 +#define RLIMIT_RTTIME 15 /* timeout for RT tasks in us */ +#define RLIM_NLIMITS 16 /* * SuS says limits have to be unsigned. @@ -86,6 +86,7 @@ [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ [RLIMIT_NICE] = { 0, 0 }, \ [RLIMIT_RTPRIO] = { 0, 0 }, \ + [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ } #endif /* __KERNEL__ */ diff --git a/include/linux/sched.h b/include/linux/sched.h index a06d09ebd5c..fe3f8fbc614 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -932,6 +932,7 @@ struct sched_entity { struct sched_rt_entity { struct list_head run_list; unsigned int time_slice; + unsigned long timeout; }; struct task_struct { diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 68c96376e84..2c076b36c4f 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -967,6 +967,7 @@ static void check_thread_timers(struct task_struct *tsk, { int maxfire; struct list_head *timers = tsk->cpu_timers; + struct signal_struct *const sig = tsk->signal; maxfire = 20; tsk->it_prof_expires = cputime_zero; @@ -1011,6 +1012,34 @@ static void check_thread_timers(struct task_struct *tsk, t->firing = 1; list_move_tail(&t->entry, firing); } + + /* + * Check for the special case thread timers. + */ + if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) { + unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max; + unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur; + + if (tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { + /* + * At the hard limit, we just die. + * No need to calculate anything else now. + */ + __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); + return; + } + if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) { + /* + * At the soft limit, send a SIGXCPU every second. + */ + if (sig->rlim[RLIMIT_RTTIME].rlim_cur + < sig->rlim[RLIMIT_RTTIME].rlim_max) { + sig->rlim[RLIMIT_RTTIME].rlim_cur += + USEC_PER_SEC; + } + __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); + } + } } /* diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 29963af782a..f350f7b1515 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -116,6 +116,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) inc_cpu_load(rq, p->se.load.weight); inc_rt_tasks(p, rq); + + if (wakeup) + p->rt.timeout = 0; } /* @@ -834,11 +837,38 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p, } } +static void watchdog(struct rq *rq, struct task_struct *p) +{ + unsigned long soft, hard; + + if (!p->signal) + return; + + soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur; + hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max; + + if (soft != RLIM_INFINITY) { + unsigned long next; + + p->rt.timeout++; + next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); + if (next > p->rt.timeout) { + u64 next_time = p->se.sum_exec_runtime; + + next_time += next * (NSEC_PER_SEC/HZ); + if (p->it_sched_expires > next_time) + p->it_sched_expires = next_time; + } else + p->it_sched_expires = p->se.sum_exec_runtime; + } +} static void task_tick_rt(struct rq *rq, struct task_struct *p) { update_curr_rt(rq); + watchdog(rq, p); + /* * RR tasks need a special form of timeslice management. * FIFO tasks have no timeslices. -- cgit v1.2.3-70-g09d2 From 01ba2bdc6b639764745ff678caf3fb9e5bcd745a Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 20 Jan 2008 14:15:03 +0100 Subject: all archs: consolidate init and exit sections in vmlinux.lds.h This patch consolidate all definitions of .init.text, .init.data and .exit.text, .exit.data section definitions in the generic vmlinux.lds.h. This is a preparational patch - alone it does not buy us much good. Signed-off-by: Sam Ravnborg --- arch/alpha/kernel/vmlinux.lds.S | 8 ++++---- arch/arm/kernel/vmlinux.lds.S | 10 +++++----- arch/avr32/kernel/vmlinux.lds.S | 8 ++++---- arch/blackfin/kernel/vmlinux.lds.S | 8 ++++---- arch/cris/arch-v10/vmlinux.lds.S | 8 ++++---- arch/cris/arch-v32/vmlinux.lds.S | 8 ++++---- arch/frv/kernel/vmlinux.lds.S | 14 +++++++------- arch/h8300/kernel/vmlinux.lds.S | 8 ++++---- arch/ia64/kernel/vmlinux.lds.S | 8 ++++---- arch/m32r/kernel/vmlinux.lds.S | 12 ++++++------ arch/m68k/kernel/vmlinux-std.lds | 8 ++++---- arch/m68k/kernel/vmlinux-sun3.lds | 8 ++++---- arch/m68knommu/kernel/vmlinux.lds.S | 8 ++++---- arch/mips/kernel/vmlinux.lds.S | 8 ++++---- arch/parisc/kernel/vmlinux.lds.S | 8 ++++---- arch/powerpc/kernel/vmlinux.lds.S | 10 ++++++---- arch/ppc/kernel/vmlinux.lds.S | 8 ++++---- arch/s390/kernel/vmlinux.lds.S | 8 ++++---- arch/sh/kernel/vmlinux_32.lds.S | 8 ++++---- arch/sh/kernel/vmlinux_64.lds.S | 8 ++++---- arch/sparc/kernel/vmlinux.lds.S | 8 ++++---- arch/sparc64/kernel/vmlinux.lds.S | 8 ++++---- arch/um/kernel/dyn.lds.S | 4 ++-- arch/um/kernel/uml.lds.S | 4 ++-- arch/v850/kernel/vmlinux.lds.S | 10 +++++----- arch/x86/kernel/vmlinux_32.lds.S | 14 ++++++++++---- arch/x86/kernel/vmlinux_64.lds.S | 19 +++++++++++++------ arch/xtensa/kernel/vmlinux.lds.S | 9 +++++---- include/asm-generic/vmlinux.lds.h | 7 +++++++ 29 files changed, 140 insertions(+), 117 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 55c05b511f4..f13249be17c 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -46,11 +46,11 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(16); @@ -136,8 +136,8 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 30f732c7fdb..4898bdcfe7d 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -30,7 +30,7 @@ SECTIONS } .init : { /* Init code and data */ - *(.init.text) + INIT_TEXT _einittext = .; __proc_info_begin = .; *(.proc.info.init) @@ -70,15 +70,15 @@ SECTIONS __per_cpu_end = .; #ifndef CONFIG_XIP_KERNEL __init_begin = _stext; - *(.init.data) + INIT_DATA . = ALIGN(4096); __init_end = .; #endif } /DISCARD/ : { /* Exit code and data */ - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) #ifndef CONFIG_MMU *(.fixup) @@ -130,7 +130,7 @@ SECTIONS #ifdef CONFIG_XIP_KERNEL . = ALIGN(4096); __init_begin = .; - *(.init.data) + INIT_DATA . = ALIGN(4096); __init_end = .; #endif diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S index 11f08e35a2e..481cfd40c05 100644 --- a/arch/avr32/kernel/vmlinux.lds.S +++ b/arch/avr32/kernel/vmlinux.lds.S @@ -27,19 +27,19 @@ SECTIONS __init_begin = .; _sinittext = .; *(.text.reset) - *(.init.text) + INIT_TEXT /* * .exit.text is discarded at runtime, not * link time, to deal with references from * __bug_table */ - *(.exit.text) + EXIT_TEXT _einittext = .; . = ALIGN(4); __tagtable_begin = .; *(.taglist.init) __tagtable_end = .; - *(.init.data) + INIT_DATA . = ALIGN(16); __setup_start = .; *(.init.setup) @@ -135,7 +135,7 @@ SECTIONS * thrown away, as cleanup code is never called unless it's a module. */ /DISCARD/ : { - *(.exit.data) + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 9b75bc83c71..858722421b4 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -91,13 +91,13 @@ SECTIONS { . = ALIGN(PAGE_SIZE); __sinittext = .; - *(.init.text) + INIT_TEXT __einittext = .; } .init.data : { . = ALIGN(16); - *(.init.data) + INIT_DATA } .init.setup : { @@ -198,8 +198,8 @@ SECTIONS /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } } diff --git a/arch/cris/arch-v10/vmlinux.lds.S b/arch/cris/arch-v10/vmlinux.lds.S index 97a7876ed68..93c9f0ea286 100644 --- a/arch/cris/arch-v10/vmlinux.lds.S +++ b/arch/cris/arch-v10/vmlinux.lds.S @@ -57,10 +57,10 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; .init.setup : { *(.init.setup) } @@ -109,8 +109,8 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.text.exit) - *(.data.exit) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/cris/arch-v32/vmlinux.lds.S b/arch/cris/arch-v32/vmlinux.lds.S index b076c134c0b..fead8c59ea6 100644 --- a/arch/cris/arch-v32/vmlinux.lds.S +++ b/arch/cris/arch-v32/vmlinux.lds.S @@ -61,10 +61,10 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; .init.setup : { *(.init.setup) } @@ -124,8 +124,8 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.text.exit) - *(.data.exit) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S index a17a81d58bf..f42b328b1dd 100644 --- a/arch/frv/kernel/vmlinux.lds.S +++ b/arch/frv/kernel/vmlinux.lds.S @@ -28,14 +28,14 @@ SECTIONS .init.text : { *(.text.head) #ifndef CONFIG_DEBUG_INFO - *(.init.text) - *(.exit.text) - *(.exit.data) + INIT_TEXT + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) #endif } _einittext = .; - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(8); __setup_start = .; @@ -106,8 +106,8 @@ SECTIONS LOCK_TEXT #ifdef CONFIG_DEBUG_INFO *( - .init.text - .exit.text + INIT_TEXT + EXIT_TEXT .exitcall.exit ) #endif @@ -138,7 +138,7 @@ SECTIONS .data : { /* Data */ DATA_DATA *(.data.*) - *(.exit.data) + EXIT_DATA CONSTRUCTORS } diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index a2e72d49555..43a87b9085b 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -110,9 +110,9 @@ SECTIONS . = ALIGN(0x4) ; ___init_begin = .; __sinittext = .; - *(.init.text) + INIT_TEXT __einittext = .; - *(.init.data) + INIT_DATA . = ALIGN(0x4) ; ___setup_start = .; *(.init.setup) @@ -124,8 +124,8 @@ SECTIONS ___con_initcall_start = .; *(.con_initcall.init) ___con_initcall_end = .; - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA #if defined(CONFIG_BLK_DEV_INITRD) . = ALIGN(4); ___initramfs_start = .; diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 757e419ebcf..80622acc95d 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -27,8 +27,8 @@ SECTIONS { /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) *(.IA_64.unwind.exit.text) *(.IA_64.unwind_info.exit.text) @@ -119,12 +119,12 @@ SECTIONS .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) - { *(.init.data) } + { INIT_DATA } #ifdef CONFIG_BLK_DEV_INITRD .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index 942a8c7a441..41b07854fcc 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S @@ -76,10 +76,10 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; .init.setup : { *(.init.setup) } @@ -100,8 +100,8 @@ SECTIONS .altinstr_replacement : { *(.altinstr_replacement) } /* .exit.text is discard at runtime, not link time, to deal with references from .altinstructions and .eh_frame */ - .exit.text : { *(.exit.text) } - .exit.data : { *(.exit.data) } + .exit.text : { EXIT_TEXT } + .exit.data : { EXIT_DATA } #ifdef CONFIG_BLK_DEV_INITRD . = ALIGN(4096); @@ -124,8 +124,8 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 59fe285865e..7537cc5e615 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -45,10 +45,10 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; .init.setup : { *(.init.setup) } @@ -82,8 +82,8 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index 4adffefb5c4..cdc313e7c29 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -38,10 +38,10 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; .init.setup : { *(.init.setup) } @@ -77,8 +77,8 @@ __init_begin = .; /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index 07a0055602f..b44edb08e21 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S @@ -143,9 +143,9 @@ SECTIONS { . = ALIGN(4096); __init_begin = .; _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; - *(.init.data) + INIT_DATA . = ALIGN(16); __setup_start = .; *(.init.setup) @@ -170,8 +170,8 @@ SECTIONS { } > INIT /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 5fc2398bdb7..b5470ceb418 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -114,11 +114,11 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(16); .init.setup : { @@ -144,10 +144,10 @@ SECTIONS * references from .rodata */ .exit.text : { - *(.exit.text) + EXIT_TEXT } .exit.data : { - *(.exit.data) + EXIT_DATA } #if defined(CONFIG_BLK_DEV_INITRD) . = ALIGN(_PAGE_SIZE); diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 40d0ff9b81a..50b4a3a25d0 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -172,11 +172,11 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(16); .init.setup : { @@ -215,10 +215,10 @@ SECTIONS * from .altinstructions and .eh_frame */ .exit.text : { - *(.exit.text) + EXIT_TEXT } .exit.data : { - *(.exit.data) + EXIT_DATA } #ifdef CONFIG_BLK_DEV_INITRD . = ALIGN(PAGE_SIZE); diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index f66fa5d966b..0afb9e31d2a 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -23,7 +23,7 @@ SECTIONS /* Sections to be discarded. */ /DISCARD/ : { *(.exitcall.exit) - *(.exit.data) + EXIT_DATA } . = KERNELBASE; @@ -76,17 +76,19 @@ SECTIONS .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } /* .exit.text is discarded at runtime, not link time, * to deal with references from __bug_table */ - .exit.text : { *(.exit.text) } + .exit.text : { + EXIT_TEXT + } .init.data : { - *(.init.data); + INIT_DATA __vtop_table_begin = .; *(.vtop_fixup); __vtop_table_end = .; diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S index 98c1212674f..52b64fcbdfc 100644 --- a/arch/ppc/kernel/vmlinux.lds.S +++ b/arch/ppc/kernel/vmlinux.lds.S @@ -97,14 +97,14 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } /* .exit.text is discarded at runtime, not link time, to deal with references from __bug_table */ - .exit.text : { *(.exit.text) } + .exit.text : { EXIT_TEXT } .init.data : { - *(.init.data); + INIT_DATA __vtop_table_begin = .; *(.vtop_fixup); __vtop_table_end = .; @@ -164,6 +164,6 @@ SECTIONS /* Sections to be discarded. */ /DISCARD/ : { *(.exitcall.exit) - *(.exit.data) + EXIT_DATA } } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 93615919934..7d43c3cd3ef 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -97,7 +97,7 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } /* @@ -105,11 +105,11 @@ SECTIONS * to deal with references from __bug_table */ .exit.text : { - *(.exit.text) + EXIT_TEXT } .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(0x100); .init.setup : { @@ -156,7 +156,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.data) + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S index d549fac6d3e..c7113786ecd 100644 --- a/arch/sh/kernel/vmlinux_32.lds.S +++ b/arch/sh/kernel/vmlinux_32.lds.S @@ -84,9 +84,9 @@ SECTIONS . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; _sinittext = .; - .init.text : { *(.init.text) } + .init.text : { INIT_TEXT } _einittext = .; - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; @@ -122,8 +122,8 @@ SECTIONS * .exit.text is discarded at runtime, not link time, to deal with * references from __bug_table */ - .exit.text : { *(.exit.text) } - .exit.data : { *(.exit.data) } + .exit.text : { EXIT_TEXT } + .exit.data : { EXIT_DATA } . = ALIGN(PAGE_SIZE); .bss : { diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S index 2fd0f740148..3f1bd6392bb 100644 --- a/arch/sh/kernel/vmlinux_64.lds.S +++ b/arch/sh/kernel/vmlinux_64.lds.S @@ -96,9 +96,9 @@ SECTIONS . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; _sinittext = .; - .init.text : C_PHYS(.init.text) { *(.init.text) } + .init.text : C_PHYS(.init.text) { INIT_TEXT } _einittext = .; - .init.data : C_PHYS(.init.data) { *(.init.data) } + .init.data : C_PHYS(.init.data) { INIT_DATA } . = ALIGN(L1_CACHE_BYTES); /* Better if Cache Line aligned */ __setup_start = .; .init.setup : C_PHYS(.init.setup) { *(.init.setup) } @@ -134,8 +134,8 @@ SECTIONS * .exit.text is discarded at runtime, not link time, to deal with * references from __bug_table */ - .exit.text : C_PHYS(.exit.text) { *(.exit.text) } - .exit.data : C_PHYS(.exit.data) { *(.exit.data) } + .exit.text : C_PHYS(.exit.text) { EXIT_TEXT } + .exit.data : C_PHYS(.exit.data) { EXIT_DATA } . = ALIGN(PAGE_SIZE); .bss : C_PHYS(.bss) { diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index a8b4200f9cc..216147d6e61 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -48,12 +48,12 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } __init_text_end = .; .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(16); .init.setup : { @@ -102,8 +102,8 @@ SECTIONS _end = . ; PROVIDE (end = .); /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 9fcd503bc04..01f809617e5 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -56,11 +56,11 @@ SECTIONS .init.text : { __init_begin = .; _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(16); .init.setup : { @@ -137,8 +137,8 @@ SECTIONS PROVIDE (end = .); /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index 3866f4960f0..26090b7f323 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -17,7 +17,7 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } @@ -84,7 +84,7 @@ SECTIONS #include "asm/common.lds.S" - init.data : { *(.init.data) } + init.data : { INIT_DATA } /* Ensure the __preinit_array_start label is properly aligned. We could instead move the label definition inside the section, but diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 13df191e2b4..5828c1d5450 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -23,7 +23,7 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } . = ALIGN(4096); @@ -48,7 +48,7 @@ SECTIONS #include "asm/common.lds.S" - init.data : { *(init.data) } + init.data : { INIT_DATA } .data : { . = ALIGN(KERNEL_STACK_SIZE); /* init_task */ diff --git a/arch/v850/kernel/vmlinux.lds.S b/arch/v850/kernel/vmlinux.lds.S index 6172599b4ce..d08cd1d27f2 100644 --- a/arch/v850/kernel/vmlinux.lds.S +++ b/arch/v850/kernel/vmlinux.lds.S @@ -114,7 +114,7 @@ #define DATA_CONTENTS \ __sdata = . ; \ DATA_DATA \ - *(.exit.data) /* 2.5 convention */ \ + EXIT_DATA /* 2.5 convention */ \ *(.data.exit) /* 2.4 convention */ \ . = ALIGN (16) ; \ *(.data.cacheline_aligned) \ @@ -157,9 +157,9 @@ . = ALIGN (4096) ; \ __init_start = . ; \ __sinittext = .; \ - *(.init.text) /* 2.5 convention */ \ + INIT_TEXT /* 2.5 convention */ \ __einittext = .; \ - *(.init.data) \ + INIT_DATA \ *(.text.init) /* 2.4 convention */ \ *(.data.init) \ INITCALL_CONTENTS \ @@ -170,7 +170,7 @@ #define ROMK_INIT_RAM_CONTENTS \ . = ALIGN (4096) ; \ __init_start = . ; \ - *(.init.data) /* 2.5 convention */ \ + INIT_DATA /* 2.5 convention */ \ *(.data.init) /* 2.4 convention */ \ __init_end = . ; \ . = ALIGN (4096) ; @@ -179,7 +179,7 @@ should go into ROM. */ #define ROMK_INIT_ROM_CONTENTS \ _sinittext = .; \ - *(.init.text) /* 2.5 convention */ \ + INIT_TEXT /* 2.5 convention */ \ _einittext = .; \ *(.text.init) /* 2.4 convention */ \ INITCALL_CONTENTS \ diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index 7d72cce0052..84c913f38f9 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S @@ -131,10 +131,12 @@ SECTIONS .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { __init_begin = .; _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) } + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { + INIT_DATA + } . = ALIGN(16); .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { __setup_start = .; @@ -169,8 +171,12 @@ SECTIONS } /* .exit.text is discard at runtime, not link time, to deal with references from .altinstructions and .eh_frame */ - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } - .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { + EXIT_TEXT + } + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { + EXIT_DATA + } #if defined(CONFIG_BLK_DEV_INITRD) . = ALIGN(4096); .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index ba8ea97abd2..ea5386944e6 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -155,12 +155,15 @@ SECTIONS __init_begin = .; .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - __initdata_begin = .; - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) } - __initdata_end = .; + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { + __initdata_begin = .; + INIT_DATA + __initdata_end = .; + } + . = ALIGN(16); __setup_start = .; .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) } @@ -187,8 +190,12 @@ SECTIONS } /* .exit.text is discard at runtime, not link time, to deal with references from .altinstructions and .eh_frame */ - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } - .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { + EXIT_TEXT + } + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { + EXIT_DATA + } /* vdso blob that is mapped into user space */ vdso_start = . ; diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index ac4ed52034d..7d0f55a4982 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -136,13 +136,13 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.literal) *(.init.text) + *(.init.literal) INIT_TEXT _einittext = .; } .init.data : { - *(.init.data) + INIT_DATA . = ALIGN(0x4); __tagtable_begin = .; *(.taglist) @@ -278,8 +278,9 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.literal .exit.text) - *(.exit.data) + *(.exit.literal) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 9f584cc5c5f..ae0166e8349 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -183,6 +183,13 @@ *(.kprobes.text) \ VMLINUX_SYMBOL(__kprobes_text_end) = .; +/* init and exit section handling */ +#define INIT_TEXT *(.init.text) +#define INIT_DATA *(.init.data) +#define EXIT_TEXT *(.exit.text) +#define EXIT_DATA *(.exit.data) + + /* DWARF debug sections. Symbols in the DWARF debugging sections are relative to the beginning of the section so we begin them at 0. */ -- cgit v1.2.3-70-g09d2 From eb8f689046b857874e964463619f09df06d59fad Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 20 Jan 2008 20:07:28 +0100 Subject: Use separate sections for __dev/__cpu/__mem code/data Introducing separate sections for __dev* (HOTPLUG), __cpu* (HOTPLUG_CPU) and __mem* (MEMORY_HOTPLUG) allows us to do a much more reliable Section mismatch check in modpost. We are no longer dependent on the actual configuration of for example HOTPLUG. This has the effect that all users see much more Section mismatch warnings than before because they were almost all hidden when HOTPLUG was enabled. The advantage of this is that when building a piece of code then it is much more likely that the Section mismatch errors are spotted and the warnings will be felt less random of nature. Signed-off-by: Sam Ravnborg Cc: Greg KH Cc: Randy Dunlap Cc: Adrian Bunk --- include/asm-generic/vmlinux.lds.h | 88 ++++++++++++++++++++++++++++++++++++--- include/linux/init.h | 77 +++++++++++++++++----------------- scripts/mod/modpost.c | 54 +++++++++++++++++------- 3 files changed, 159 insertions(+), 60 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index ae0166e8349..e0a56fb8f81 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -9,10 +9,46 @@ /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) +/* The actual configuration determine if the init/exit sections + * are handled as text/data or they can be discarded (which + * often happens at runtime) + */ +#ifdef CONFIG_HOTPLUG +#define DEV_KEEP(sec) *(.dev##sec) +#define DEV_DISCARD(sec) +#else +#define DEV_KEEP(sec) +#define DEV_DISCARD(sec) *(.dev##sec) +#endif + +#ifdef CONFIG_HOTPLUG_CPU +#define CPU_KEEP(sec) *(.cpu##sec) +#define CPU_DISCARD(sec) +#else +#define CPU_KEEP(sec) +#define CPU_DISCARD(sec) *(.cpu##sec) +#endif + +#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) \ + || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE) +#define MEM_KEEP(sec) *(.mem##sec) +#define MEM_DISCARD(sec) +#else +#define MEM_KEEP(sec) +#define MEM_DISCARD(sec) *(.mem##sec) +#endif + + /* .data section */ #define DATA_DATA \ *(.data) \ *(.data.init.refok) \ + DEV_KEEP(init.data) \ + DEV_KEEP(exit.data) \ + CPU_KEEP(init.data) \ + CPU_KEEP(exit.data) \ + MEM_KEEP(init.data) \ + MEM_KEEP(exit.data) \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___markers) = .; \ *(__markers) \ @@ -132,6 +168,16 @@ *(__ksymtab_strings) \ } \ \ + /* __*init sections */ \ + __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ + DEV_KEEP(init.rodata) \ + DEV_KEEP(exit.rodata) \ + CPU_KEEP(init.rodata) \ + CPU_KEEP(exit.rodata) \ + MEM_KEEP(init.rodata) \ + MEM_KEEP(exit.rodata) \ + } \ + \ /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___param) = .; \ @@ -139,7 +185,6 @@ VMLINUX_SYMBOL(__stop___param) = .; \ VMLINUX_SYMBOL(__end_rodata) = .; \ } \ - \ . = ALIGN((align)); /* RODATA provided for backward compatibility. @@ -159,7 +204,14 @@ ALIGN_FUNCTION(); \ *(.text) \ *(.text.init.refok) \ - *(.exit.text.refok) + *(.exit.text.refok) \ + DEV_KEEP(init.text) \ + DEV_KEEP(exit.text) \ + CPU_KEEP(init.text) \ + CPU_KEEP(exit.text) \ + MEM_KEEP(init.text) \ + MEM_KEEP(exit.text) + /* sched.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ @@ -184,11 +236,35 @@ VMLINUX_SYMBOL(__kprobes_text_end) = .; /* init and exit section handling */ -#define INIT_TEXT *(.init.text) -#define INIT_DATA *(.init.data) -#define EXIT_TEXT *(.exit.text) -#define EXIT_DATA *(.exit.data) +#define INIT_DATA \ + *(.init.data) \ + DEV_DISCARD(init.data) \ + DEV_DISCARD(init.rodata) \ + CPU_DISCARD(init.data) \ + CPU_DISCARD(init.rodata) \ + MEM_DISCARD(init.data) \ + MEM_DISCARD(init.rodata) + +#define INIT_TEXT \ + *(.init.text) \ + DEV_DISCARD(init.text) \ + CPU_DISCARD(init.text) \ + MEM_DISCARD(init.text) + +#define EXIT_DATA \ + *(.exit.data) \ + DEV_DISCARD(exit.data) \ + DEV_DISCARD(exit.rodata) \ + CPU_DISCARD(exit.data) \ + CPU_DISCARD(exit.rodata) \ + MEM_DISCARD(exit.data) \ + MEM_DISCARD(exit.rodata) +#define EXIT_TEXT \ + *(.exit.text) \ + DEV_DISCARD(exit.text) \ + CPU_DISCARD(exit.text) \ + MEM_DISCARD(exit.text) /* DWARF debug sections. Symbols in the DWARF debugging sections are relative to diff --git a/include/linux/init.h b/include/linux/init.h index 99807681840..dcb66c76bd4 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -60,18 +60,54 @@ #define __exit_refok noinline __section(.exit.text.refok) #ifdef MODULE -#define __exit __section(.exit.text) __cold +#define __exitused #else -#define __exit __attribute_used__ __section(.exit.text) __cold +#define __exitused __used #endif +#define __exit __section(.exit.text) __exitused __cold + +/* Used for HOTPLUG */ +#define __devinit __section(.devinit.text) __cold +#define __devinitdata __section(.devinit.data) +#define __devinitconst __section(.devinit.rodata) +#define __devexit __section(.devexit.text) __exitused __cold +#define __devexitdata __section(.devexit.data) +#define __devexitconst __section(.devexit.rodata) + +/* Used for HOTPLUG_CPU */ +#define __cpuinit __section(.cpuinit.text) __cold +#define __cpuinitdata __section(.cpuinit.data) +#define __cpuinitconst __section(.cpuinit.rodata) +#define __cpuexit __section(.cpuexit.text) __exitused __cold +#define __cpuexitdata __section(.cpuexit.data) +#define __cpuexitconst __section(.cpuexit.rodata) + +/* Used for MEMORY_HOTPLUG */ +#define __meminit __section(.meminit.text) __cold +#define __meminitdata __section(.meminit.data) +#define __meminitconst __section(.meminit.rodata) +#define __memexit __section(.memexit.text) __exitused __cold +#define __memexitdata __section(.memexit.data) +#define __memexitconst __section(.memexit.rodata) + /* For assembly routines */ #define __INIT .section ".init.text","ax" #define __INIT_REFOK .section ".text.init.refok","ax" #define __FINIT .previous + #define __INITDATA .section ".init.data","aw" #define __INITDATA_REFOK .section ".data.init.refok","aw" +#define __DEVINIT .section ".devinit.text", "ax" +#define __DEVINITDATA .section ".devinit.data", "aw" + +#define __CPUINIT .section ".cpuinit.text", "ax" +#define __CPUINITDATA .section ".cpuinit.data", "aw" + +#define __MEMINIT .section ".meminit.text", "ax" +#define __MEMINITDATA .section ".meminit.data", "aw" + #ifndef __ASSEMBLY__ /* * Used for initialization calls.. @@ -254,43 +290,6 @@ void __init parse_early_param(void); #define __initdata_or_module __initdata #endif /*CONFIG_MODULES*/ -#ifdef CONFIG_HOTPLUG -#define __devinit -#define __devinitdata -#define __devexit -#define __devexitdata -#else -#define __devinit __init -#define __devinitdata __initdata -#define __devexit __exit -#define __devexitdata __exitdata -#endif - -#ifdef CONFIG_HOTPLUG_CPU -#define __cpuinit -#define __cpuinitdata -#define __cpuexit -#define __cpuexitdata -#else -#define __cpuinit __init -#define __cpuinitdata __initdata -#define __cpuexit __exit -#define __cpuexitdata __exitdata -#endif - -#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) \ - || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE) -#define __meminit -#define __meminitdata -#define __memexit -#define __memexitdata -#else -#define __meminit __init -#define __meminitdata __initdata -#define __memexit __exit -#define __memexitdata __exitdata -#endif - /* Functions marked as __devexit may be discarded at kernel link time, depending on config options. Newer versions of binutils detect references from retained sections to discarded sections and flag an error. Pointers to diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 986513dcd70..730b321680c 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -670,27 +670,41 @@ int match(const char *sym, const char * const pat[]) static const char *section_white_list[] = { ".debug*", ".stab*", ".note*", ".got*", ".toc*", NULL }; -#define INIT_DATA_SECTIONS ".init.data$" -#define EXIT_DATA_SECTIONS ".exit.data$" +#define ALL_INIT_DATA_SECTIONS \ + ".init.data$", ".devinit.data$", ".cpuinit.data$", ".meminit.data$" +#define ALL_EXIT_DATA_SECTIONS \ + ".exit.data$", ".devexit.data$", ".cpuexit.data$", ".memexit.data$" -#define INIT_TEXT_SECTIONS ".init.text$" -#define EXIT_TEXT_SECTIONS ".exit.text$" +#define ALL_INIT_TEXT_SECTIONS \ + ".init.text$", ".devinit.text$", ".cpuinit.text$", ".meminit.text$" +#define ALL_EXIT_TEXT_SECTIONS \ + ".exit.text$", ".devexit.text$", ".cpuexit.text$", ".memexit.text$" -#define INIT_SECTIONS INIT_DATA_SECTIONS, INIT_TEXT_SECTIONS -#define EXIT_SECTIONS EXIT_DATA_SECTIONS, EXIT_TEXT_SECTIONS +#define ALL_INIT_SECTIONS ALL_INIT_DATA_SECTIONS, ALL_INIT_TEXT_SECTIONS +#define ALL_EXIT_SECTIONS ALL_EXIT_DATA_SECTIONS, ALL_EXIT_TEXT_SECTIONS #define DATA_SECTIONS ".data$", ".data.rel$" #define TEXT_SECTIONS ".text$" +#define INIT_SECTIONS ".init.data$", ".init.text$" +#define DEV_INIT_SECTIONS ".devinit.data$", ".devinit.text$" +#define CPU_INIT_SECTIONS ".cpuinit.data$", ".cpuinit.text$" +#define MEM_INIT_SECTIONS ".meminit.data$", ".meminit.text$" + +#define EXIT_SECTIONS ".exit.data$", ".exit.text$" +#define DEV_EXIT_SECTIONS ".devexit.data$", ".devexit.text$" +#define CPU_EXIT_SECTIONS ".cpuexit.data$", ".cpuexit.text$" +#define MEM_EXIT_SECTIONS ".memexit.data$", ".memexit.text$" + /* init data sections */ -static const char *init_data_sections[] = { INIT_DATA_SECTIONS, NULL }; +static const char *init_data_sections[] = { ALL_INIT_DATA_SECTIONS, NULL }; /* all init sections */ -static const char *init_sections[] = { INIT_SECTIONS, NULL }; +static const char *init_sections[] = { ALL_INIT_SECTIONS, NULL }; /* All init and exit sections (code + data) */ static const char *init_exit_sections[] = - {INIT_SECTIONS, EXIT_SECTIONS, NULL }; + {ALL_INIT_SECTIONS, ALL_EXIT_SECTIONS, NULL }; /* data section */ static const char *data_sections[] = { DATA_SECTIONS, NULL }; @@ -734,22 +748,32 @@ const struct sectioncheck sectioncheck[] = { */ { .fromsec = { TEXT_SECTIONS, DATA_SECTIONS, NULL }, - .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL } + .tosec = { ALL_INIT_SECTIONS, ALL_EXIT_SECTIONS, NULL } +}, +/* Do not reference init code/data from devinit/cpuinit/meminit code/data */ +{ + .fromsec = { DEV_INIT_SECTIONS, CPU_INIT_SECTIONS, MEM_INIT_SECTIONS, NULL }, + .tosec = { INIT_SECTIONS, NULL } +}, +/* Do not reference exit code/data from devexit/cpuexit/memexit code/data */ +{ + .fromsec = { DEV_EXIT_SECTIONS, CPU_EXIT_SECTIONS, MEM_EXIT_SECTIONS, NULL }, + .tosec = { EXIT_SECTIONS, NULL } }, /* Do not use exit code/data from init code */ { - .fromsec = { INIT_SECTIONS, NULL }, - .tosec = { EXIT_SECTIONS, NULL }, + .fromsec = { ALL_INIT_SECTIONS, NULL }, + .tosec = { ALL_EXIT_SECTIONS, NULL }, }, /* Do not use init code/data from exit code */ { - .fromsec = { EXIT_SECTIONS, NULL }, - .tosec = { INIT_SECTIONS, NULL } + .fromsec = { ALL_EXIT_SECTIONS, NULL }, + .tosec = { ALL_INIT_SECTIONS, NULL } }, /* Do not export init/exit functions or data */ { .fromsec = { "__ksymtab*", NULL }, - .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL } + .tosec = { ALL_INIT_SECTIONS, ALL_EXIT_SECTIONS, NULL } } }; -- cgit v1.2.3-70-g09d2 From 1a3fb6d481689d0482eacadcbe3205b49b423c11 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Thu, 24 Jan 2008 22:20:18 +0100 Subject: asm-generic/vmlix.lds.h: simplify __mem{init,exit}* dependencies Simplify the dependencies on __mem{init,exit}* (ACPI_HOTPLUG_MEMORY requires MEMORY_HOTPLUG). Signed-off-by: Adrian Bunk Signed-off-by: Sam Ravnborg --- include/asm-generic/vmlinux.lds.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index e0a56fb8f81..29485305370 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -29,8 +29,7 @@ #define CPU_DISCARD(sec) *(.cpu##sec) #endif -#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) \ - || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE) +#if defined(CONFIG_MEMORY_HOTPLUG) #define MEM_KEEP(sec) *(.mem##sec) #define MEM_DISCARD(sec) #else -- cgit v1.2.3-70-g09d2 From 312b1485fb509c9bc32eda28ad29537896658cb8 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Mon, 28 Jan 2008 20:21:15 +0100 Subject: Introduce new section reference annotations tags: __ref, __refdata, __refconst Today we have the following annotations for functions/data referencing __init/__exit functions / data: __init_refok => for init functions __initdata_refok => for init data __exit_refok => for exit functions There is really no difference between the __init and __exit versions and simplify it and to introduce a shorter annotation the following new annotations are introduced: __ref => for functions (code) that references __*init / __*exit __refdata => for variables __refconst => for const variables Whit this annotation is it more obvious what the annotation is for and there is no longer the arbitary division between __init and __exit code. The mechanishm is the same as before - a special section is created which is made part of the usual sections in the linker script. We will start to see annotations like this: -static struct pci_serial_quirk pci_serial_quirks[] = { +static const struct pci_serial_quirk pci_serial_quirks[] __refconst = { ----------------- -static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = +static struct notifier_block cpuid_class_cpu_notifier __refdata = ---------------- -static int threshold_cpu_callback(struct notifier_block *nfb, +static int __ref threshold_cpu_callback(struct notifier_block *nfb, [The above is just random samples]. Note: No modifications were needed in modpost to support the new sections due to the newly introduced blacklisting. Signed-off-by: Sam Ravnborg --- include/asm-generic/vmlinux.lds.h | 3 +++ include/linux/init.h | 34 +++++++++++++++++++++++++++------- 2 files changed, 30 insertions(+), 7 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 29485305370..76df771be58 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -42,6 +42,7 @@ #define DATA_DATA \ *(.data) \ *(.data.init.refok) \ + *(.ref.data) \ DEV_KEEP(init.data) \ DEV_KEEP(exit.data) \ CPU_KEEP(init.data) \ @@ -169,6 +170,7 @@ \ /* __*init sections */ \ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ + *(.ref.rodata) \ DEV_KEEP(init.rodata) \ DEV_KEEP(exit.rodata) \ CPU_KEEP(init.rodata) \ @@ -202,6 +204,7 @@ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ *(.text) \ + *(.ref.text) \ *(.text.init.refok) \ *(.exit.text.refok) \ DEV_KEEP(init.text) \ diff --git a/include/linux/init.h b/include/linux/init.h index dde1eaa7766..2efbda01674 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -52,12 +52,26 @@ * when early init has completed so all such references are potential bugs. * For exit sections the same issue exists. * The following markers are used for the cases where the reference to - * the init/exit section (code or data) is valid and will teach modpost - * not to issue a warning. + * the *init / *exit section (code or data) is valid and will teach + * modpost not to issue a warning. * The markers follow same syntax rules as __init / __initdata. */ -#define __init_refok noinline __section(.text.init.refok) -#define __initdata_refok __section(.data.init.refok) -#define __exit_refok noinline __section(.exit.text.refok) +#define __ref __section(.ref.text) noinline +#define __refdata __section(.ref.data) +#define __refconst __section(.ref.rodata) + +/* backward compatibility note + * A few places hardcode the old section names: + * .text.init.refok + * .data.init.refok + * .exit.text.refok + * They should be converted to use the defines from this file + */ + +/* compatibility defines */ +#define __init_refok __ref +#define __initdata_refok __refdata +#define __exit_refok __ref + #ifdef MODULE #define __exitused @@ -93,11 +107,9 @@ /* For assembly routines */ #define __INIT .section ".init.text","ax" -#define __INIT_REFOK .section ".text.init.refok","ax" #define __FINIT .previous #define __INITDATA .section ".init.data","aw" -#define __INITDATA_REFOK .section ".data.init.refok","aw" #define __DEVINIT .section ".devinit.text", "ax" #define __DEVINITDATA .section ".devinit.data", "aw" @@ -108,6 +120,14 @@ #define __MEMINIT .section ".meminit.text", "ax" #define __MEMINITDATA .section ".meminit.data", "aw" +/* silence warnings when references are OK */ +#define __REF .section ".ref.text", "ax" +#define __REFDATA .section ".ref.data", "aw" +#define __REFCONST .section ".ref.rodata", "aw" +/* backward compatibility */ +#define __INIT_REFOK .section __REF +#define __INITDATA_REFOK .section __REFDATA + #ifndef __ASSEMBLY__ /* * Used for initialization calls.. -- cgit v1.2.3-70-g09d2 From aa02ad67d9b308290fde390682cd039b29f7ab85 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 28 Jan 2008 23:58:27 -0500 Subject: ext4: Add ext4_find_next_bit() This function is used by the ext4 multi block allocator patches. Also add generic_find_next_le_bit Signed-off-by: Aneesh Kumar K.V Cc: Signed-off-by: Andrew Morton --- include/asm-arm/bitops.h | 2 ++ include/asm-generic/bitops/ext2-non-atomic.h | 2 ++ include/asm-generic/bitops/le.h | 4 +++ include/asm-m68k/bitops.h | 2 ++ include/asm-m68knommu/bitops.h | 2 ++ include/asm-powerpc/bitops.h | 4 +++ include/asm-s390/bitops.h | 2 ++ include/linux/ext4_fs.h | 1 + lib/find_next_bit.c | 43 ++++++++++++++++++++++++++++ 9 files changed, 62 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h index 47a6b086eee..5c60bfc1a84 100644 --- a/include/asm-arm/bitops.h +++ b/include/asm-arm/bitops.h @@ -310,6 +310,8 @@ static inline int constant_fls(int x) _find_first_zero_bit_le(p,sz) #define ext2_find_next_zero_bit(p,sz,off) \ _find_next_zero_bit_le(p,sz,off) +#define ext2_find_next_bit(p, sz, off) \ + _find_next_bit_le(p, sz, off) /* * Minix is defined to use little-endian byte ordering. diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h index 1697404afa0..63cf822431a 100644 --- a/include/asm-generic/bitops/ext2-non-atomic.h +++ b/include/asm-generic/bitops/ext2-non-atomic.h @@ -14,5 +14,7 @@ generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) #define ext2_find_next_zero_bit(addr, size, off) \ generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) +#define ext2_find_next_bit(addr, size, off) \ + generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) #endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h index b9c7e5d2d2a..80e3bf13b2b 100644 --- a/include/asm-generic/bitops/le.h +++ b/include/asm-generic/bitops/le.h @@ -20,6 +20,8 @@ #define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) #define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) +#define generic_find_next_le_bit(addr, size, offset) \ + find_next_bit(addr, size, offset) #elif defined(__BIG_ENDIAN) @@ -42,6 +44,8 @@ extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +extern unsigned long generic_find_next_le_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); #else #error "Please fix " diff --git a/include/asm-m68k/bitops.h b/include/asm-m68k/bitops.h index 2976b5d68e9..83d1f286230 100644 --- a/include/asm-m68k/bitops.h +++ b/include/asm-m68k/bitops.h @@ -410,6 +410,8 @@ static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size, res = ext2_find_first_zero_bit (p, size - 32 * (p - addr)); return (p - addr) * 32 + res; } +#define ext2_find_next_bit(addr, size, off) \ + generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) #endif /* __KERNEL__ */ diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h index f8dfb7ba2e2..f43afe1fc3b 100644 --- a/include/asm-m68knommu/bitops.h +++ b/include/asm-m68knommu/bitops.h @@ -294,6 +294,8 @@ found_middle: return result + ffz(__swab32(tmp)); } +#define ext2_find_next_bit(addr, size, off) \ + generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) #include #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h index 733b4af7f4f..220d9a781ab 100644 --- a/include/asm-powerpc/bitops.h +++ b/include/asm-powerpc/bitops.h @@ -359,6 +359,8 @@ static __inline__ int test_le_bit(unsigned long nr, unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +unsigned long generic_find_next_le_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); /* Bitmap functions for the ext2 filesystem */ #define ext2_set_bit(nr,addr) \ @@ -378,6 +380,8 @@ unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, #define ext2_find_next_zero_bit(addr, size, off) \ generic_find_next_zero_le_bit((unsigned long*)addr, size, off) +#define ext2_find_next_bit(addr, size, off) \ + generic_find_next_le_bit((unsigned long *)addr, size, off) /* Bitmap functions for the minix filesystem. */ #define minix_test_and_set_bit(nr,addr) \ diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index 34d9a6357c3..dba6fecad0b 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h @@ -772,6 +772,8 @@ static inline int sched_find_first_bit(unsigned long *b) test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) #define ext2_test_bit(nr, addr) \ test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) +#define ext2_find_next_bit(addr, size, off) \ + generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) #ifndef __s390x__ diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h index 213974f7c5d..d0b7ca99b91 100644 --- a/include/linux/ext4_fs.h +++ b/include/linux/ext4_fs.h @@ -493,6 +493,7 @@ do { \ #define ext4_test_bit ext2_test_bit #define ext4_find_first_zero_bit ext2_find_first_zero_bit #define ext4_find_next_zero_bit ext2_find_next_zero_bit +#define ext4_find_next_bit ext2_find_next_bit /* * Maximal mount counts between two filesystem checks diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index bda0d71a251..78ccd73a884 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -178,4 +178,47 @@ found_middle_swap: EXPORT_SYMBOL(generic_find_next_zero_le_bit); +unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if (offset) { + tmp = ext2_swabp(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + + while (size & ~(BITS_PER_LONG - 1)) { + tmp = *(p++); + if (tmp) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = ext2_swabp(p); +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); + +found_middle_swap: + return result + __ffs(ext2_swab(tmp)); +} +EXPORT_SYMBOL(generic_find_next_le_bit); #endif /* __BIG_ENDIAN */ -- cgit v1.2.3-70-g09d2 From 3a6a62f96f168d192fb0cc9c0b5ee2584740b32d Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 30 Jan 2008 13:32:50 +0100 Subject: debug: introduce __WARN() Introduce __WARN() in the generic case, so the generic WARN_ON() can use arch-specific code for when the condition is true. Signed-off-by: Olof Johansson Cc: Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-generic/bug.h | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index d56fedbb457..1a0e1a7684b 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -31,14 +31,19 @@ struct bug_entry { #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) #endif -#ifndef HAVE_ARCH_WARN_ON +#ifndef __WARN +#define __WARN() do { \ + printk("WARNING: at %s:%d %s()\n", __FILE__, \ + __LINE__, __FUNCTION__); \ + dump_stack(); \ +} while (0) +#endif + +#ifndef WARN_ON #define WARN_ON(condition) ({ \ int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) { \ - printk("WARNING: at %s:%d %s()\n", __FILE__, \ - __LINE__, __FUNCTION__); \ - dump_stack(); \ - } \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ unlikely(__ret_warn_on); \ }) #endif -- cgit v1.2.3-70-g09d2 From 79b4cc5ee7a8086ac2c9c0afa52e6d687ce1ffef Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Wed, 30 Jan 2008 13:32:50 +0100 Subject: debug: move WARN_ON() out of line A quick grep shows that there are currently 1145 instances of WARN_ON in the kernel. Currently, WARN_ON is pretty much entirely inlined, which makes it hard to enhance it without growing the size of the kernel (and getting Andrew unhappy). This patch build on top of Olof's patch that introduces __WARN, and places the slowpath out of line. It also uses Ingo's suggestion to not use __FUNCTION__ but to use kallsyms to do the lookup; this saves a ton of extra space since gcc doesn't need to store the function string twice now: 3936367 833603 624736 5394706 525112 vmlinux.before 3917508 833603 624736 5375847 520767 vmlinux-slowpath 15Kb savings... Signed-off-by: Arjan van de Ven CC: Andrew Morton CC: Olof Johansson Acked-by: Matt Meckall Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-generic/bug.h | 10 +++++----- kernel/panic.c | 15 +++++++++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 1a0e1a7684b..2632328d864 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -32,11 +32,11 @@ struct bug_entry { #endif #ifndef __WARN -#define __WARN() do { \ - printk("WARNING: at %s:%d %s()\n", __FILE__, \ - __LINE__, __FUNCTION__); \ - dump_stack(); \ -} while (0) +#ifndef __ASSEMBLY__ +extern void warn_on_slowpath(const char *file, const int line); +#define WANT_WARN_ON_SLOWPATH +#endif +#define __WARN() warn_on_slowpath(__FILE__, __LINE__) #endif #ifndef WARN_ON diff --git a/kernel/panic.c b/kernel/panic.c index da4d6bac270..0ebea438278 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -20,6 +20,7 @@ #include #include #include +#include int panic_on_oops; int tainted; @@ -292,6 +293,20 @@ void oops_exit(void) (unsigned long long)oops_id); } +#ifdef WANT_WARN_ON_SLOWPATH +void warn_on_slowpath(const char *file, int line) +{ + char function[KSYM_SYMBOL_LEN]; + unsigned long caller = (unsigned long) __builtin_return_address(0); + + sprint_symbol(function, caller); + printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, + line, function); + dump_stack(); +} +EXPORT_SYMBOL(warn_on_slowpath); +#endif + #ifdef CONFIG_CC_STACKPROTECTOR /* * Called when gcc's -fstack-protector feature is used, and -- cgit v1.2.3-70-g09d2 From b32ef636a59aad12f9f9b5dc34c93222842c58ba Mon Sep 17 00:00:00 2001 From: "travis@sgi.com" Date: Wed, 30 Jan 2008 13:32:51 +0100 Subject: percpu: use a kconfig variable to signal arch specific percpu setup The use of the __GENERIC_PERCPU is a bit problematic since arches may want to run their own percpu setup while using the generic percpu definitions. Replace it through a kconfig variable. Cc: Rusty Russell Cc: Andi Kleen Signed-off-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/ia64/Kconfig | 3 +++ arch/powerpc/Kconfig | 3 +++ arch/sparc64/Kconfig | 3 +++ arch/x86/Kconfig | 3 +++ include/asm-generic/percpu.h | 1 - include/asm-s390/percpu.h | 2 -- include/asm-x86/percpu_32.h | 2 -- init/main.c | 4 ++-- 8 files changed, 14 insertions(+), 7 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 4a81b7fb191..5a41e75ae1f 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -80,6 +80,9 @@ config GENERIC_TIME_VSYSCALL bool default y +config ARCH_SETS_UP_PER_CPU_AREA + def_bool y + config DMI bool default y diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index c17a194beb0..fb85f6b72fc 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -42,6 +42,9 @@ config GENERIC_HARDIRQS bool default y +config ARCH_SETS_UP_PER_CPU_AREA + def_bool PPC64 + config IRQ_PER_CPU bool default y diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 1e25bce0366..26f5791baa3 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -66,6 +66,9 @@ config AUDIT_ARCH bool default y +config ARCH_SETS_UP_PER_CPU_AREA + def_bool y + config ARCH_NO_VIRT_TO_BUS def_bool y diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1de28e850aa..dc80a9f1f30 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -97,6 +97,9 @@ config GENERIC_TIME_VSYSCALL bool default X86_64 +config ARCH_SETS_UP_PER_CPU_AREA + def_bool X86_64 + config ARCH_SUPPORTS_OPROFILE bool default y diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index d85172e9ed4..b5e53b9ab1f 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -3,7 +3,6 @@ #include #include -#define __GENERIC_PER_CPU #ifdef CONFIG_SMP extern unsigned long __per_cpu_offset[NR_CPUS]; diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h index 545857e6444..f94d0d3cdb2 100644 --- a/include/asm-s390/percpu.h +++ b/include/asm-s390/percpu.h @@ -4,8 +4,6 @@ #include #include -#define __GENERIC_PER_CPU - /* * s390 uses its own implementation for per cpu data, the offset of * the cpu local data area is cached in the cpu's lowcore memory. diff --git a/include/asm-x86/percpu_32.h b/include/asm-x86/percpu_32.h index a7ebd436f3c..3949586bf94 100644 --- a/include/asm-x86/percpu_32.h +++ b/include/asm-x86/percpu_32.h @@ -41,8 +41,6 @@ * PER_CPU(cpu_gdt_descr, %ebx) */ #ifdef CONFIG_SMP -/* Same as generic implementation except for optimized local access. */ -#define __GENERIC_PER_CPU /* This is used for other cpus to find our section. */ extern unsigned long __per_cpu_offset[]; diff --git a/init/main.c b/init/main.c index f287ca5862b..3f8aba291ed 100644 --- a/init/main.c +++ b/init/main.c @@ -363,7 +363,7 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { } #else -#ifdef __GENERIC_PER_CPU +#ifndef CONFIG_ARCH_SETS_UP_PER_CPU_AREA unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); @@ -384,7 +384,7 @@ static void __init setup_per_cpu_areas(void) ptr += size; } } -#endif /* !__GENERIC_PER_CPU */ +#endif /* CONFIG_ARCH_SETS_UP_CPU_AREA */ /* Called by boot processor to activate the rest. */ static void __init smp_init(void) -- cgit v1.2.3-70-g09d2 From 5280e004fc22314122c84978c0b6a741cf96dc0f Mon Sep 17 00:00:00 2001 From: "travis@sgi.com" Date: Wed, 30 Jan 2008 13:32:52 +0100 Subject: percpu: move arch XX_PER_CPU_XX definitions into linux/percpu.h - Special consideration for IA64: Add the ability to specify arch specific per cpu flags - remove .data.percpu attribute from DEFINE_PER_CPU for non-smp case. The arch definitions are all the same. So move them into linux/percpu.h. We cannot move DECLARE_PER_CPU since some include files just include asm/percpu.h to avoid include recursion problems. Cc: Rusty Russell Cc: Andi Kleen Signed-off-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-generic/percpu.h | 18 ------------------ include/asm-ia64/percpu.h | 24 ++---------------------- include/asm-powerpc/percpu.h | 17 ----------------- include/asm-s390/percpu.h | 18 ------------------ include/asm-sparc64/percpu.h | 16 ---------------- include/asm-x86/percpu_32.h | 12 ------------ include/asm-x86/percpu_64.h | 17 ----------------- include/linux/percpu.h | 24 ++++++++++++++++++++++++ 8 files changed, 26 insertions(+), 120 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index b5e53b9ab1f..e038f13594e 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define per_cpu_offset(x) (__per_cpu_offset[x]) -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*({ \ extern int simple_identifier_##var(void); \ @@ -35,12 +26,6 @@ do { \ } while (0) #else /* ! SMP */ -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) - #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define __raw_get_cpu_var(var) per_cpu__##var @@ -49,7 +34,4 @@ do { \ #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - #endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h index c4f1e328a5b..0095bcf7984 100644 --- a/include/asm-ia64/percpu.h +++ b/include/asm-ia64/percpu.h @@ -16,28 +16,11 @@ #include #ifdef HAVE_MODEL_SMALL_ATTRIBUTE -# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__))) -#else -# define __SMALL_ADDR_AREA +# define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__))) #endif #define DECLARE_PER_CPU(type, name) \ - extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name - -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) \ - __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name - -#ifdef CONFIG_SMP -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp -#else -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) -#endif + extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name /* * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an @@ -68,9 +51,6 @@ extern void *per_cpu_init(void); #endif /* SMP */ -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - /* * Be extremely careful when taking the address of this variable! Due to virtual * remapping, it is different from the canonical address returned by __get_cpu_var(var)! diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index 6b229626d3f..cc1cbf656b0 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h @@ -16,15 +16,6 @@ #define __my_cpu_offset() get_paca()->data_offset #define per_cpu_offset(x) (__per_cpu_offset(x)) -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) @@ -43,11 +34,6 @@ extern void setup_per_cpu_areas(void); #else /* ! SMP */ -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) - #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define __raw_get_cpu_var(var) per_cpu__##var @@ -56,9 +42,6 @@ extern void setup_per_cpu_areas(void); #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - #else #include #endif diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h index f94d0d3cdb2..2d676a87385 100644 --- a/include/asm-s390/percpu.h +++ b/include/asm-s390/percpu.h @@ -34,16 +34,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) \ - __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) #define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) @@ -60,11 +50,6 @@ do { \ #else /* ! SMP */ -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) - #define __get_cpu_var(var) __reloc_hide(var,0) #define __raw_get_cpu_var(var) __reloc_hide(var,0) #define per_cpu(var,cpu) __reloc_hide(var,0) @@ -73,7 +58,4 @@ do { \ #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - #endif /* __ARCH_S390_PERCPU__ */ diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h index a1f53a4da40..c7e52decba9 100644 --- a/include/asm-sparc64/percpu.h +++ b/include/asm-sparc64/percpu.h @@ -16,15 +16,6 @@ extern unsigned long __per_cpu_shift; (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) #define per_cpu_offset(x) (__per_cpu_offset(x)) -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset)) @@ -41,10 +32,6 @@ do { \ #else /* ! SMP */ #define real_setup_per_cpu_areas() do { } while (0) -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) #define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var @@ -54,7 +41,4 @@ do { \ #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - #endif /* __ARCH_SPARC64_PERCPU__ */ diff --git a/include/asm-x86/percpu_32.h b/include/asm-x86/percpu_32.h index 3949586bf94..77bd0045f33 100644 --- a/include/asm-x86/percpu_32.h +++ b/include/asm-x86/percpu_32.h @@ -47,16 +47,7 @@ extern unsigned long __per_cpu_offset[]; #define per_cpu_offset(x) (__per_cpu_offset[x]) -/* Separate out the type, so (int[3], foo) works. */ #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - /* We can use this directly for local CPU (faster). */ DECLARE_PER_CPU(unsigned long, this_cpu_off); @@ -81,9 +72,6 @@ do { \ (src), (size)); \ } while (0) -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ #define __percpu_seg "%%fs:" #else /* !SMP */ diff --git a/include/asm-x86/percpu_64.h b/include/asm-x86/percpu_64.h index 5abd4827010..24fe7075248 100644 --- a/include/asm-x86/percpu_64.h +++ b/include/asm-x86/percpu_64.h @@ -16,15 +16,6 @@ #define per_cpu_offset(x) (__per_cpu_offset(x)) -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_internodealigned_in_smp - /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*({ \ extern int simple_identifier_##var(void); \ @@ -49,11 +40,6 @@ extern void setup_per_cpu_areas(void); #else /* ! SMP */ -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) - #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define __raw_get_cpu_var(var) per_cpu__##var @@ -62,7 +48,4 @@ extern void setup_per_cpu_areas(void); #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - #endif /* _ASM_X8664_PERCPU_H_ */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 926adaae0f9..00412bb494c 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -9,6 +9,30 @@ #include +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif + +#ifdef CONFIG_SMP +#define DEFINE_PER_CPU(type, name) \ + __attribute__((__section__(".data.percpu"))) \ + PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name + +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ + ____cacheline_aligned_in_smp +#else +#define DEFINE_PER_CPU(type, name) \ + PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name + +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) +#endif + +#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) +#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) + /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ #ifndef PERCPU_ENOUGH_ROOM #ifdef CONFIG_MODULES -- cgit v1.2.3-70-g09d2 From acdac87202a408133ee8f7985076de9d2e0dc5ab Mon Sep 17 00:00:00 2001 From: "travis@sgi.com" Date: Wed, 30 Jan 2008 13:32:52 +0100 Subject: percpu: make the asm-generic/percpu.h more "generic" - add support for PER_CPU_ATTRIBUTES - fix generic smp percpu_modcopy to use per_cpu_offset() macro. Add the ability to use generic/percpu even if the arch needs to override several aspects of its operations. This will enable the use of generic percpu.h for all arches. An arch may define: __per_cpu_offset Do not use the generic pointer array. Arch must define per_cpu_offset(cpu) (used by x86_64, s390). __my_cpu_offset Can be defined to provide an optimized way to determine the offset for variables of the currently executing processor. Used by ia64, x86_64, x86_32, sparc64, s/390. SHIFT_PTR(ptr, offset) If an arch defines it then special handling of pointer arithmentic may be implemented. Used by s/390. (Some of these special percpu arch implementations may be later consolidated so that there are less cases to deal with.) Cc: Rusty Russell Cc: Andi Kleen Signed-off-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/ia64/kernel/module.c | 2 +- include/asm-generic/percpu.h | 74 +++++++++++++++++++++++++++++++++++++------- 2 files changed, 64 insertions(+), 12 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 196287928ba..e699eb6c44b 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -947,7 +947,7 @@ percpu_modcopy (void *pcpudst, const void *src, unsigned long size) { unsigned int i; for_each_possible_cpu(i) { - memcpy(pcpudst + __per_cpu_offset[i], src, size); + memcpy(pcpudst + per_cpu_offset(i), src, size); } } #endif /* CONFIG_SMP */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index e038f13594e..c41b1a73112 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -3,35 +3,87 @@ #include #include +/* + * Determine the real variable name from the name visible in the + * kernel sources. + */ +#define per_cpu_var(var) per_cpu__##var + #ifdef CONFIG_SMP +/* + * per_cpu_offset() is the offset that has to be added to a + * percpu variable to get to the instance for a certain processor. + * + * Most arches use the __per_cpu_offset array for those offsets but + * some arches have their own ways of determining the offset (x86_64, s390). + */ +#ifndef __per_cpu_offset extern unsigned long __per_cpu_offset[NR_CPUS]; #define per_cpu_offset(x) (__per_cpu_offset[x]) +#endif + +/* + * Determine the offset for the currently active processor. + * An arch may define __my_cpu_offset to provide a more effective + * means of obtaining the offset to the per cpu variables of the + * current processor. + */ +#ifndef __my_cpu_offset +#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) +#define my_cpu_offset per_cpu_offset(smp_processor_id()) +#else +#define my_cpu_offset __my_cpu_offset +#endif + +/* + * Add a offset to a pointer but keep the pointer as is. + * + * Only S390 provides its own means of moving the pointer. + */ +#ifndef SHIFT_PERCPU_PTR +#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset)) +#endif -/* var is in discarded region: offset to particular copy we want */ -#define per_cpu(var, cpu) (*({ \ - extern int simple_identifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) -#define __get_cpu_var(var) per_cpu(var, smp_processor_id()) -#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id()) +/* + * A percpu variable may point to a discarded reghions. The following are + * established ways to produce a usable pointer from the percpu variable + * offset. + */ +#define per_cpu(var, cpu) \ + (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu))) +#define __get_cpu_var(var) \ + (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset)) +#define __raw_get_cpu_var(var) \ + (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) + + +#ifdef CONFIG_ARCH_SETS_UP_PER_CPU_AREA +extern void setup_per_cpu_areas(void); +#endif /* A macro to avoid #include hell... */ #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ for_each_possible_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset[__i], \ + memcpy((pcpudst)+per_cpu_offset(__i), \ (src), (size)); \ } while (0) #else /* ! SMP */ -#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) -#define __get_cpu_var(var) per_cpu__##var -#define __raw_get_cpu_var(var) per_cpu__##var +#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) +#define __get_cpu_var(var) per_cpu_var(var) +#define __raw_get_cpu_var(var) per_cpu_var(var) #endif /* SMP */ -#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif + +#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ + __typeof__(type) per_cpu_var(name) #endif /* _ASM_GENERIC_PERCPU_H_ */ -- cgit v1.2.3-70-g09d2 From dd5af90a7f3d79e04b7eace9a98644dbf2038f4d Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Wed, 30 Jan 2008 13:33:32 +0100 Subject: x86/non-x86: percpu, node ids, apic ids x86.git fixup Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/Kconfig | 2 +- include/asm-generic/percpu.h | 12 ++---------- init/main.c | 4 ++-- kernel/module.c | 8 ++++++++ 4 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f0887d12a5b..8e1b33c5405 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -97,7 +97,7 @@ config GENERIC_TIME_VSYSCALL bool default X86_64 -config ARCH_SETS_UP_PER_CPU_AREA +config HAVE_SETUP_PER_CPU_AREA def_bool X86_64 config ARCH_SUPPORTS_OPROFILE diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index c41b1a73112..4b8d31cda1a 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -47,7 +47,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #endif /* - * A percpu variable may point to a discarded reghions. The following are + * A percpu variable may point to a discarded regions. The following are * established ways to produce a usable pointer from the percpu variable * offset. */ @@ -59,18 +59,10 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) -#ifdef CONFIG_ARCH_SETS_UP_PER_CPU_AREA +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); #endif -/* A macro to avoid #include hell... */ -#define percpu_modcopy(pcpudst, src, size) \ -do { \ - unsigned int __i; \ - for_each_possible_cpu(__i) \ - memcpy((pcpudst)+per_cpu_offset(__i), \ - (src), (size)); \ -} while (0) #else /* ! SMP */ #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) diff --git a/init/main.c b/init/main.c index 5843fe99670..3316dffe3e5 100644 --- a/init/main.c +++ b/init/main.c @@ -363,7 +363,7 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { } #else -#ifndef CONFIG_ARCH_SETS_UP_PER_CPU_AREA +#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); @@ -384,7 +384,7 @@ static void __init setup_per_cpu_areas(void) ptr += size; } } -#endif /* CONFIG_ARCH_SETS_UP_CPU_AREA */ +#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ /* Called by boot processor to activate the rest. */ static void __init smp_init(void) diff --git a/kernel/module.c b/kernel/module.c index f6a4e721fd4..bd60278ee70 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -430,6 +430,14 @@ static unsigned int find_pcpusec(Elf_Ehdr *hdr, return find_sec(hdr, sechdrs, secstrings, ".data.percpu"); } +static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size) +{ + int cpu; + + for_each_possible_cpu(cpu) + memcpy(pcpudest + per_cpu_offset(cpu), from, size); +} + static int percpu_modinit(void) { pcpu_num_used = 2; -- cgit v1.2.3-70-g09d2 From a5a19c63f4e55e32dc0bc3d936d7f94793d8b380 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 30 Jan 2008 13:33:39 +0100 Subject: x86: demacro asm-x86/pgalloc_32.h Convert macros into inline functions, for better type-checking. This patch required a little bit of fiddling with headers in order to make __(pte|pmd)_free_tlb inline rather than macros. asm-generic/tlb.h includes asm/pgalloc.h, though it doesn't directly use any pgalloc definitions. I removed this include to avoid an include cycle, but it may cause secondary compile failures by things depending on the indirect inclusion; arch/x86/mm/hugetlbpage.c was one such place; there may be others. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/hugetlbpage.c | 3 +- arch/x86/mm/init_32.c | 1 + include/asm-generic/tlb.h | 1 - include/asm-x86/pgalloc_32.h | 61 +++++++++++++++++++++++++--------------- include/asm-x86/pgtable-3level.h | 2 -- include/linux/swap.h | 1 + 6 files changed, 43 insertions(+), 26 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 6c06d9c0488..4fbafb4bc2f 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -15,6 +15,7 @@ #include #include #include +#include static unsigned long page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, @@ -88,7 +89,7 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) spin_lock(&mm->page_table_lock); if (pud_none(*pud)) - pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK); + pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); else put_page(virt_to_page(spte)); spin_unlock(&mm->page_table_lock); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 98d2acae4f6..459b384acda 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 75f2bfab614..6ce9f3ab928 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -15,7 +15,6 @@ #include #include -#include #include /* diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h index fbc6357f5eb..3482c342789 100644 --- a/include/asm-x86/pgalloc_32.h +++ b/include/asm-x86/pgalloc_32.h @@ -3,6 +3,8 @@ #include #include /* for struct page */ +#include +#include #ifdef CONFIG_PARAVIRT #include @@ -14,19 +16,20 @@ #define paravirt_release_pd(pfn) do { } while (0) #endif -#define pmd_populate_kernel(mm, pmd, pte) \ -do { \ - paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \ - set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \ -} while (0) +static inline void pmd_populate_kernel(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) +{ + paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); +} -#define pmd_populate(mm, pmd, pte) \ -do { \ - paravirt_alloc_pt(mm, page_to_pfn(pte)); \ - set_pmd(pmd, __pmd(_PAGE_TABLE + \ - ((unsigned long long)page_to_pfn(pte) << \ - (unsigned long long) PAGE_SHIFT))); \ -} while (0) +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) +{ + unsigned long pfn = page_to_pfn(pte); + + paravirt_alloc_pt(mm, pfn); + set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); +} /* * Allocate and free page tables. @@ -48,20 +51,34 @@ static inline void pte_free(struct page *pte) } -#define __pte_free_tlb(tlb,pte) \ -do { \ - paravirt_release_pt(page_to_pfn(pte)); \ - tlb_remove_page((tlb),(pte)); \ -} while (0) +static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) +{ + paravirt_release_pt(page_to_pfn(pte)); + tlb_remove_page(tlb, pte); +} #ifdef CONFIG_X86_PAE /* * In the PAE case we free the pmds as part of the pgd. */ -#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) -#define pmd_free(x) do { } while (0) -#define __pmd_free_tlb(tlb,x) do { } while (0) -#define pud_populate(mm, pmd, pte) BUG() -#endif +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + BUG(); + return (pmd_t *)2; +} + +static inline void pmd_free(pmd_t *pmd) +{ +} + +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) +{ +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + BUG(); +} +#endif /* CONFIG_X86_PAE */ #endif /* _I386_PGALLOC_H */ diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 5af7a44ed90..62bb06575d5 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h @@ -157,6 +157,4 @@ static inline unsigned long pte_pfn(pte_t pte) #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) -#define __pmd_free_tlb(tlb, x) do { } while (0) - #endif /* _I386_PGTABLE_3LEVEL_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 4f3838adbb3..2c3ce4c69b2 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3-70-g09d2 From edeed30589f5defe63ce6aaae56f2b7c855e4520 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Wed, 30 Jan 2008 13:34:08 +0100 Subject: x86: add testcases for RODATA and NX protections/attributes Latest update; I now have 4 NX tests, but 2 fail so they're #if 0'd. I also cleaned up the NX test code quite a bit, and got rid of the ugly exception table sorting stuff. From: Arjan van de Ven This patch adds testcases for the CONFIG_DEBUG_RODATA configuration option as well as the NX CPU feature/mappings. Both testcases can move to tests/ once that patch gets merged into mainline. (I'm half considering moving the rodata test into mm/init.c but I'll wait with that until init.c is unified) As part of this I had to fix a not-quite-right alignment in the vmlinux.lds.h for the RODATA sections, which lead to 1 page less being marked read only. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/test_nx.c | 176 ++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/test_rodata.c | 86 +++++++++++++++++++ arch/x86/mm/init_32.c | 3 + arch/x86/mm/init_64.c | 3 + include/asm-generic/vmlinux.lds.h | 1 + include/asm-x86/cacheflush.h | 7 ++ 6 files changed, 276 insertions(+) create mode 100644 arch/x86/kernel/test_nx.c create mode 100644 arch/x86/kernel/test_rodata.c (limited to 'include/asm-generic') diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c new file mode 100644 index 00000000000..6d7ef11e797 --- /dev/null +++ b/arch/x86/kernel/test_nx.c @@ -0,0 +1,176 @@ +/* + * test_nx.c: functional test for NX functionality + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#include +#include +#include + +extern int rodata_test_data; + +/* + * This file checks 4 things: + * 1) Check if the stack is not executable + * 2) Check if kmalloc memory is not executable + * 3) Check if the .rodata section is not executable + * 4) Check if the .data section of a module is not executable + * + * To do this, the test code tries to execute memory in stack/kmalloc/etc, + * and then checks if the expected trap happens. + * + * Sadly, this implies having a dynamic exception handling table entry. + * ... which can be done (and will make Rusty cry)... but it can only + * be done in a stand-alone module with only 1 entry total. + * (otherwise we'd have to sort and that's just too messy) + */ + + + +/* + * We want to set up an exception handling point on our stack, + * which means a variable value. This function is rather dirty + * and walks the exception table of the module, looking for a magic + * marker and replaces it with a specific function. + */ +static void fudze_exception_table(void *marker, void *new) +{ + struct module *mod = THIS_MODULE; + struct exception_table_entry *extable; + + /* + * Note: This module has only 1 exception table entry, + * so searching and sorting is not needed. If that changes, + * this would be the place to search and re-sort the exception + * table. + */ + if (mod->num_exentries > 1) { + printk(KERN_ERR "test_nx: too many exception table entries!\n"); + printk(KERN_ERR "test_nx: test results are not reliable.\n"); + return; + } + extable = (struct exception_table_entry *)mod->extable; + extable[0].insn = (unsigned long)new; +} + + +/* + * exception tables get their symbols translated so we need + * to use a fake function to put in there, which we can then + * replace at runtime. + */ +void foo_label(void); + +/* + * returns 0 for not-executable, negative for executable + * + * Note: we cannot allow this function to be inlined, because + * that would give us more than 1 exception table entry. + * This in turn would break the assumptions above. + */ +static noinline int test_address(void *address) +{ + unsigned long result; + + /* Set up an exception table entry for our address */ + fudze_exception_table(&foo_label, address); + result = 1; + asm volatile( + "foo_label:\n" + "0: call *%[fake_code]\n" + "1:\n" + ".section .fixup,\"ax\"\n" + "2: mov %[zero], %[rslt]\n" + " ret\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 0b\n" + " .quad 2b\n" + ".previous\n" + : [rslt] "=r" (result) + : [fake_code] "r" (address), [zero] "r" (0UL), "0" (result) + ); + /* change the exception table back for the next round */ + fudze_exception_table(address, &foo_label); + + if (result) + return -ENODEV; + return 0; +} + +static unsigned char test_data = 0xC3; /* 0xC3 is the opcode for "ret" */ + +static int test_NX(void) +{ + int ret = 0; + /* 0xC3 is the opcode for "ret" */ + char stackcode[] = {0xC3, 0x90, 0 }; + char *heap; + + test_data = 0xC3; + + printk(KERN_INFO "Testing NX protection\n"); + + /* Test 1: check if the stack is not executable */ + if (test_address(&stackcode)) { + printk(KERN_ERR "test_nx: stack was executable\n"); + ret = -ENODEV; + } + + + /* Test 2: Check if the heap is executable */ + heap = kmalloc(64, GFP_KERNEL); + if (!heap) + return -ENOMEM; + heap[0] = 0xC3; /* opcode for "ret" */ + + if (test_address(heap)) { + printk(KERN_ERR "test_nx: heap was executable\n"); + ret = -ENODEV; + } + kfree(heap); + + /* + * The following 2 tests currently fail, this needs to get fixed + * Until then, don't run them to avoid too many people getting scared + * by the error message + */ +#if 0 + +#ifdef CONFIG_DEBUG_RODATA + /* Test 3: Check if the .rodata section is executable */ + if (rodata_test_data != 0xC3) { + printk(KERN_ERR "test_nx: .rodata marker has invalid value\n"); + ret = -ENODEV; + } else if (test_address(&rodata_test_data)) { + printk(KERN_ERR "test_nx: .rodata section is executable\n"); + ret = -ENODEV; + } +#endif + + /* Test 4: Check if the .data section of a module is executable */ + if (test_address(&test_data)) { + printk(KERN_ERR "test_nx: .data section is executable\n"); + ret = -ENODEV; + } + +#endif + return 0; +} + +static void test_exit(void) +{ +} + +module_init(test_NX); +module_exit(test_exit); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Testcase for the NX infrastructure"); +MODULE_AUTHOR("Arjan van de Ven "); diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c new file mode 100644 index 00000000000..4c163772000 --- /dev/null +++ b/arch/x86/kernel/test_rodata.c @@ -0,0 +1,86 @@ +/* + * test_rodata.c: functional test for mark_rodata_ro function + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#include +#include +extern int rodata_test_data; + +int rodata_test(void) +{ + unsigned long result; + unsigned long start, end; + + /* test 1: read the value */ + /* If this test fails, some previous testrun has clobbered the state */ + if (!rodata_test_data) { + printk(KERN_ERR "rodata_test: test 1 fails (start data)\n"); + return -ENODEV; + } + + /* test 2: write to the variable; this should fault */ + /* + * If this test fails, we managed to overwrite the data + * + * This is written in assembly to be able to catch the + * exception that is supposed to happen in the correct + * case + */ + + result = 1; + asm volatile( + "0: mov %[zero],(%[rodata_test])\n" + " mov %[zero], %[rslt]\n" + "1:\n" + ".section .fixup,\"ax\"\n" + "2: jmp 1b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 16\n" +#ifdef CONFIG_X86_32 + " .long 0b,2b\n" +#else + " .quad 0b,2b\n" +#endif + ".previous" + : [rslt] "=r" (result) + : [rodata_test] "r" (&rodata_test_data), [zero] "r" (0UL) + ); + + + if (!result) { + printk(KERN_ERR "rodata_test: test data was not read only\n"); + return -ENODEV; + } + + /* test 3: check the value hasn't changed */ + /* If this test fails, we managed to overwrite the data */ + if (!rodata_test_data) { + printk(KERN_ERR "rodata_test: Test 3 failes (end data)\n"); + return -ENODEV; + } + /* test 4: check if the rodata section is 4Kb aligned */ + start = (unsigned long)__start_rodata; + end = (unsigned long)__end_rodata; + if (start & (PAGE_SIZE - 1)) { + printk(KERN_ERR "rodata_test: .rodata is not 4k aligned\n"); + return -ENODEV; + } + if (end & (PAGE_SIZE - 1)) { + printk(KERN_ERR "rodata_test: .rodata end is not 4k aligned\n"); + return -ENODEV; + } + + return 0; +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Testcase for the DEBUG_RODATA infrastructure"); +MODULE_AUTHOR("Arjan van de Ven "); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 4d115654519..a72737c0574 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -736,6 +736,8 @@ static int noinline do_test_wp_bit(void) } #ifdef CONFIG_DEBUG_RODATA +const int rodata_test_data = 0xC3; +EXPORT_SYMBOL_GPL(rodata_test_data); void mark_rodata_ro(void) { @@ -765,6 +767,7 @@ void mark_rodata_ro(void) set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk("Write protecting the kernel read-only data: %luk\n", size >> 10); + rodata_test(); #ifdef CONFIG_CPA_DEBUG printk("Testing CPA: undo %lx-%lx\n", start, start + size); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index f97ace7a55e..50d29f5da02 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -573,6 +573,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) set_memory_rw(begin, (end - begin)/PAGE_SIZE); set_memory_np(begin, (end - begin)/PAGE_SIZE); set_memory_nx(begin, (end - begin)/PAGE_SIZE); + rodata_test(); } #endif } @@ -585,6 +586,8 @@ void free_initmem(void) } #ifdef CONFIG_DEBUG_RODATA +const int rodata_test_data = 0xC3; +EXPORT_SYMBOL_GPL(rodata_test_data); void mark_rodata_ro(void) { diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 76df771be58..f784d2f3414 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -184,6 +184,7 @@ VMLINUX_SYMBOL(__start___param) = .; \ *(__param) \ VMLINUX_SYMBOL(__stop___param) = .; \ + . = ALIGN((align)); \ VMLINUX_SYMBOL(__end_rodata) = .; \ } \ . = ALIGN((align)); diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 157da0206cc..3e74aff9080 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -47,5 +47,12 @@ void clflush_cache_range(void *addr, int size); #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); #endif +#ifdef CONFIG_DEBUG_RODATA_TEST +void rodata_test(void); +#else +static inline void rodata_test(void) +{ +} +#endif #endif -- cgit v1.2.3-70-g09d2 From 62152d0ea7012382cd814c7b361b4ef2029f26e6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 31 Jan 2008 22:05:48 +0100 Subject: asm-generic/tlb.h: build fix bring back the avr32, blackfin, sh, sparc architectures into working order, by reverting the effects of this change that came in via the x86 tree: commit a5a19c63f4e55e32dc0bc3d936d7f94793d8b380 Author: Jeremy Fitzhardinge Date: Wed Jan 30 13:33:39 2008 +0100 x86: demacro asm-x86/pgalloc_32.h Sorry about that! Signed-off-by: Ingo Molnar --- include/asm-generic/tlb.h | 1 + include/linux/swap.h | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 6ce9f3ab928..75f2bfab614 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -15,6 +15,7 @@ #include #include +#include #include /* diff --git a/include/linux/swap.h b/include/linux/swap.h index 2c3ce4c69b2..4f3838adbb3 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3-70-g09d2