diff options
author | Mike Frysinger <vapier.adi@gmail.com> | 2008-02-02 15:53:17 +0800 |
---|---|---|
committer | Bryan Wu <bryan.wu@analog.com> | 2008-02-02 15:53:17 +0800 |
commit | b7627acc432a36072253bb1288f56e78c7d9423e (patch) | |
tree | 1d79f1c3a68c21ec1b51428a88d8f5149dbbbbf4 /arch/blackfin | |
parent | 80f31c8a03d2f0644d0ceaf14e7e0108a007c962 (diff) |
[Blackfin] arch: move the init sections to the end of memory
Move the init sections to the end of memory so that after they
are free, run time memory is all continugous - this should help decrease
memory fragementation.
When doing this, we also pack some of the other sections a little closer
together, to make sure we don't waste memory. To make this happen,
we need to rename the .data.init_task section to .init_task.data, so
it doesn't get picked up by the linker script glob.
Signed-off-by: Mike Frysinger <vapier.adi@gmail.com>
Signed-off-by: Bryan Wu <bryan.wu@analog.com>
Diffstat (limited to 'arch/blackfin')
-rw-r--r-- | arch/blackfin/kernel/init_task.c | 2 | ||||
-rw-r--r-- | arch/blackfin/kernel/setup.c | 14 | ||||
-rw-r--r-- | arch/blackfin/kernel/vmlinux.lds.S | 47 |
3 files changed, 36 insertions, 27 deletions
diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c index 673c860ffc2..c640154030e 100644 --- a/arch/blackfin/kernel/init_task.c +++ b/arch/blackfin/kernel/init_task.c @@ -57,5 +57,5 @@ EXPORT_SYMBOL(init_task); * "init_task" linker map entry. */ union thread_union init_thread_union - __attribute__ ((__section__(".data.init_task"))) = { + __attribute__ ((__section__(".init_task.data"))) = { INIT_THREAD_INFO(init_task)}; diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index aca5e6e5bbd..06008078949 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -427,7 +427,7 @@ static __init void parse_cmdline_early(char *cmdline_p) static __init void memory_setup(void) { _rambase = (unsigned long)_stext; - _ramstart = (unsigned long)__bss_stop; + _ramstart = (unsigned long)_end; if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { console_init(); @@ -489,7 +489,7 @@ static __init void memory_setup(void) } /* Relocate MTD image to the top of memory after the uncached memory area */ - dma_memcpy((char *)memory_end, __bss_stop, mtd_size); + dma_memcpy((char *)memory_end, _end, mtd_size); memory_mtd_start = memory_end; _ebss = memory_mtd_start; /* define _ebss for compatible */ @@ -528,13 +528,13 @@ static __init void memory_setup(void) printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20); printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); - printk( KERN_INFO "Memory map:\n" + printk(KERN_INFO "Memory map:\n" KERN_INFO " text = 0x%p-0x%p\n" KERN_INFO " rodata = 0x%p-0x%p\n" + KERN_INFO " bss = 0x%p-0x%p\n" KERN_INFO " data = 0x%p-0x%p\n" KERN_INFO " stack = 0x%p-0x%p\n" KERN_INFO " init = 0x%p-0x%p\n" - KERN_INFO " bss = 0x%p-0x%p\n" KERN_INFO " available = 0x%p-0x%p\n" #ifdef CONFIG_MTD_UCLINUX KERN_INFO " rootfs = 0x%p-0x%p\n" @@ -544,12 +544,12 @@ static __init void memory_setup(void) #endif , _stext, _etext, __start_rodata, __end_rodata, + __bss_start, __bss_stop, _sdata, _edata, (void *)&init_thread_union, (void *)((int)(&init_thread_union) + 0x2000), - __init_begin, __init_end, - __bss_start, __bss_stop, - (void *)_ramstart, (void *)memory_end + __init_begin, __init_end, + (void *)_ramstart, (void *)memory_end #ifdef CONFIG_MTD_UCLINUX , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size) #endif diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 858722421b4..aed832540b3 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -41,6 +41,9 @@ _jiffies = _jiffies_64; SECTIONS { . = CONFIG_BOOT_LOAD; + /* Neither the text, ro_data or bss section need to be aligned + * So pack them back to back + */ .text : { __text = .; @@ -58,22 +61,25 @@ SECTIONS *(__ex_table) ___stop___ex_table = .; - . = ALIGN(4); __etext = .; } - RO_DATA(PAGE_SIZE) + /* Just in case the first read only is a 32-bit access */ + RO_DATA(4) + + .bss : + { + . = ALIGN(4); + ___bss_start = .; + *(.bss .bss.*) + *(COMMON) + ___bss_stop = .; + } .data : { - /* make sure the init_task is aligned to the - * kernel thread size so we can locate the kernel - * stack properly and quickly. - */ __sdata = .; - . = ALIGN(THREAD_SIZE); - *(.data.init_task) - + /* This gets done first, so the glob doesn't suck it in */ . = ALIGN(32); *(.data.cacheline_aligned) @@ -81,10 +87,22 @@ SECTIONS *(.data.*) CONSTRUCTORS + /* make sure the init_task is aligned to the + * kernel thread size so we can locate the kernel + * stack properly and quickly. + */ . = ALIGN(THREAD_SIZE); + *(.init_task.data) + __edata = .; } + /* The init section should be last, so when we free it, it goes into + * the general memory pool, and (hopefully) will decrease fragmentation + * a tiny bit. The init section has a _requirement_ that it be + * PAGE_SIZE aligned + */ + . = ALIGN(PAGE_SIZE); ___init_begin = .; .init.text : @@ -179,16 +197,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); ___init_end = .; - .bss : - { - . = ALIGN(4); - ___bss_start = .; - *(.bss .bss.*) - *(COMMON) - . = ALIGN(4); - ___bss_stop = .; - __end = .; - } + __end =.; STABS_DEBUG |