diff options
author | Tony Luck <tony.luck@intel.com> | 2007-11-06 15:14:45 -0800 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-11-06 15:14:45 -0800 |
commit | 4b9ddc7cf272a0af321c22ef9c00d76384402d48 (patch) | |
tree | 14b91c54bd8d77e8218ec5df84f5cc5cd469077e /arch/ia64/mm/contig.c | |
parent | 4b07ae9b9d7b05a63e3ece32a666041949b7f421 (diff) |
[IA64] Fix section mismatch in contig.c version of per_cpu_init()
There is a section mismatch when building CONFIG_FLATMEM=y kernels
that also have CONFIG_HOTPLUG_CPU=y
WARNING: vmlinux.o(.text+0x5a902): Section mismatch: reference to \
.init.text:__alloc_bootmem (between 'per_cpu_init' and 'count_pages')
The issue occurs because per_cpu_init() in mm/contig.c is
marked __cpuinit (which is #define'd to nothing on a hot
plug cpu configuration) call __alloc_bootmem() (which is
an __init function). The usage is actually safe because
the __alloc_bootmem() is inside an "if (first_time)" test
so that the call is only made while it is still legal to
do so.
But the warning is irritating. Move the allocation to
find_memory().
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm/contig.c')
-rw-r--r-- | arch/ia64/mm/contig.c | 74 |
1 files changed, 41 insertions, 33 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index d3c538be466..7e9c275ea14 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -146,6 +146,46 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg) return 0; } +#ifdef CONFIG_SMP +static void *cpu_data; +/** + * per_cpu_init - setup per-cpu variables + * + * Allocate and setup per-cpu data areas. + */ +void * __cpuinit +per_cpu_init (void) +{ + int cpu; + static int first_time=1; + + /* + * get_free_pages() cannot be used before cpu_init() done. BSP + * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls + * get_zeroed_page(). + */ + if (first_time) { + first_time=0; + for (cpu = 0; cpu < NR_CPUS; cpu++) { + memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); + __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start; + cpu_data += PERCPU_PAGE_SIZE; + per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; + } + } + return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; +} + +static inline void +alloc_per_cpu_data(void) +{ + cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, + PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); +} +#else +#define alloc_per_cpu_data() do { } while (0) +#endif /* CONFIG_SMP */ + /** * find_memory - setup memory map * @@ -182,41 +222,9 @@ find_memory (void) find_initrd(); + alloc_per_cpu_data(); } -#ifdef CONFIG_SMP -/** - * per_cpu_init - setup per-cpu variables - * - * Allocate and setup per-cpu data areas. - */ -void * __cpuinit -per_cpu_init (void) -{ - void *cpu_data; - int cpu; - static int first_time=1; - - /* - * get_free_pages() cannot be used before cpu_init() done. BSP - * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls - * get_zeroed_page(). - */ - if (first_time) { - first_time=0; - cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, - PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); - for (cpu = 0; cpu < NR_CPUS; cpu++) { - memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); - __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start; - cpu_data += PERCPU_PAGE_SIZE; - per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; - } - } - return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; -} -#endif /* CONFIG_SMP */ - static int count_pages (u64 start, u64 end, void *arg) { |