diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-06-18 10:53:12 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-18 10:53:19 +0200 |
commit | 646b1db4956ba8bf748b835b5eba211133d91c2e (patch) | |
tree | 061166d873d9da9cf83044a7593ad111787076c5 /include/asm-generic | |
parent | 0f2c3de2ba110626515234d5d584fb1b0c0749a2 (diff) | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) |
Merge commit 'v2.6.35-rc3' into perf/core
Merge reason: Go from -rc1 base to -rc3 base, merge in fixes.
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/percpu.h | 10 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 38 |
2 files changed, 24 insertions, 24 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 04f91c2d3f7..b5043a9890d 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -80,7 +80,7 @@ extern void setup_per_cpu_areas(void); #ifndef PER_CPU_BASE_SECTION #ifdef CONFIG_SMP -#define PER_CPU_BASE_SECTION ".data.percpu" +#define PER_CPU_BASE_SECTION ".data..percpu" #else #define PER_CPU_BASE_SECTION ".data" #endif @@ -92,15 +92,15 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_SHARED_ALIGNED_SECTION "" #define PER_CPU_ALIGNED_SECTION "" #else -#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" -#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #endif -#define PER_CPU_FIRST_SECTION ".first" +#define PER_CPU_FIRST_SECTION "..first" #else #define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #define PER_CPU_FIRST_SECTION "" #endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index ef779c6fc3d..48c5299cbf2 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -175,25 +175,25 @@ #define NOSAVE_DATA \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_begin) = .; \ - *(.data.nosave) \ + *(.data..nosave) \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_end) = .; #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data.page_aligned) + *(.data..page_aligned) #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ - *(.data.read_mostly) + *(.data..read_mostly) #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ - *(.data.cacheline_aligned) + *(.data..cacheline_aligned) #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ - *(.data.init_task) + *(.data..init_task) /* * Read only Data @@ -435,7 +435,7 @@ */ #define INIT_TASK_DATA_SECTION(align) \ . = ALIGN(align); \ - .data.init_task : { \ + .data..init_task : { \ INIT_TASK_DATA(align) \ } @@ -499,7 +499,7 @@ #define BSS(bss_align) \ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ - *(.bss.page_aligned) \ + *(.bss..page_aligned) \ *(.dynbss) \ *(.bss) \ *(COMMON) \ @@ -666,16 +666,16 @@ */ #define PERCPU_VADDR(vaddr, phdr) \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ + .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data.percpu.first) \ - *(.data.percpu.page_aligned) \ - *(.data.percpu) \ - *(.data.percpu.shared_aligned) \ + *(.data..percpu..first) \ + *(.data..percpu..page_aligned) \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ } phdr \ - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); + . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); /** * PERCPU - define output section for percpu area, simple version @@ -687,18 +687,18 @@ * * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except * that __per_cpu_load is defined as a relative symbol against - * .data.percpu which is required for relocatable x86_32 + * .data..percpu which is required for relocatable x86_32 * configuration. */ #define PERCPU(align) \ . = ALIGN(align); \ - .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data.percpu.first) \ - *(.data.percpu.page_aligned) \ - *(.data.percpu) \ - *(.data.percpu.shared_aligned) \ + *(.data..percpu..first) \ + *(.data..percpu..page_aligned) \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ } |