summaryrefslogtreecommitdiffstats
path: root/arch/avr32/kernel/setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/avr32/kernel/setup.c')
-rw-r--r--arch/avr32/kernel/setup.c495
1 files changed, 232 insertions, 263 deletions
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index 53a1ff0cb05..d0a35a1b6a6 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -32,13 +32,6 @@
extern int root_mountflags;
/*
- * Bootloader-provided information about physical memory
- */
-struct tag_mem_range *mem_phys;
-struct tag_mem_range *mem_reserved;
-struct tag_mem_range *mem_ramdisk;
-
-/*
* Initialize loops_per_jiffy as 5000000 (500MIPS).
* Better make it too large than too small...
*/
@@ -50,32 +43,153 @@ EXPORT_SYMBOL(boot_cpu_data);
static char __initdata command_line[COMMAND_LINE_SIZE];
/*
- * Should be more than enough, but if you have a _really_ complex
- * setup, you might need to increase the size of this...
+ * Standard memory resources
*/
-static struct tag_mem_range __initdata mem_range_cache[32];
-static unsigned mem_range_next_free;
+static struct resource __initdata kernel_data = {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM,
+};
+static struct resource __initdata kernel_code = {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM,
+ .sibling = &kernel_data,
+};
/*
- * Standard memory resources
+ * Available system RAM and reserved regions as singly linked
+ * lists. These lists are traversed using the sibling pointer in
+ * struct resource and are kept sorted at all times.
*/
-static struct resource mem_res[] = {
- {
- .name = "Kernel code",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_MEM
- },
- {
- .name = "Kernel data",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_MEM,
- },
-};
+static struct resource *__initdata system_ram;
+static struct resource *__initdata reserved = &kernel_code;
+
+/*
+ * We need to allocate these before the bootmem allocator is up and
+ * running, so we need this "cache". 32 entries are probably enough
+ * for all but the most insanely complex systems.
+ */
+static struct resource __initdata res_cache[32];
+static unsigned int __initdata res_cache_next_free;
+
+static void __init resource_init(void)
+{
+ struct resource *mem, *res;
+ struct resource *new;
+
+ kernel_code.start = __pa(init_mm.start_code);
+
+ for (mem = system_ram; mem; mem = mem->sibling) {
+ new = alloc_bootmem_low(sizeof(struct resource));
+ memcpy(new, mem, sizeof(struct resource));
+
+ new->sibling = NULL;
+ if (request_resource(&iomem_resource, new))
+ printk(KERN_WARNING "Bad RAM resource %08x-%08x\n",
+ mem->start, mem->end);
+ }
+
+ for (res = reserved; res; res = res->sibling) {
+ new = alloc_bootmem_low(sizeof(struct resource));
+ memcpy(new, res, sizeof(struct resource));
+
+ new->sibling = NULL;
+ if (insert_resource(&iomem_resource, new))
+ printk(KERN_WARNING
+ "Bad reserved resource %s (%08x-%08x)\n",
+ res->name, res->start, res->end);
+ }
+}
+
+static void __init
+add_physical_memory(resource_size_t start, resource_size_t end)
+{
+ struct resource *new, *next, **pprev;
+
+ for (pprev = &system_ram, next = system_ram; next;
+ pprev = &next->sibling, next = next->sibling) {
+ if (end < next->start)
+ break;
+ if (start <= next->end) {
+ printk(KERN_WARNING
+ "Warning: Physical memory map is broken\n");
+ printk(KERN_WARNING
+ "Warning: %08x-%08x overlaps %08x-%08x\n",
+ start, end, next->start, next->end);
+ return;
+ }
+ }
+
+ if (res_cache_next_free >= ARRAY_SIZE(res_cache)) {
+ printk(KERN_WARNING
+ "Warning: Failed to add physical memory %08x-%08x\n",
+ start, end);
+ return;
+ }
+
+ new = &res_cache[res_cache_next_free++];
+ new->start = start;
+ new->end = end;
+ new->name = "System RAM";
+ new->flags = IORESOURCE_MEM;
+
+ *pprev = new;
+}
+
+static int __init
+add_reserved_region(resource_size_t start, resource_size_t end,
+ const char *name)
+{
+ struct resource *new, *next, **pprev;
-#define kernel_code mem_res[0]
-#define kernel_data mem_res[1]
+ if (end < start)
+ return -EINVAL;
+
+ if (res_cache_next_free >= ARRAY_SIZE(res_cache))
+ return -ENOMEM;
+
+ for (pprev = &reserved, next = reserved; next;
+ pprev = &next->sibling, next = next->sibling) {
+ if (end < next->start)
+ break;
+ if (start <= next->end)
+ return -EBUSY;
+ }
+
+ new = &res_cache[res_cache_next_free++];
+ new->start = start;
+ new->end = end;
+ new->name = name;
+ new->flags = IORESOURCE_MEM;
+
+ *pprev = new;
+
+ return 0;
+}
+
+static unsigned long __init
+find_free_region(const struct resource *mem, resource_size_t size,
+ resource_size_t align)
+{
+ struct resource *res;
+ unsigned long target;
+
+ target = ALIGN(mem->start, align);
+ for (res = reserved; res; res = res->sibling) {
+ if ((target + size) <= res->start)
+ break;
+ if (target <= res->end)
+ target = ALIGN(res->end + 1, align);
+ }
+
+ if ((target + size) > (mem->end + 1))
+ return mem->end + 1;
+
+ return target;
+}
/*
* Early framebuffer allocation. Works as follows:
@@ -112,42 +226,6 @@ static int __init early_parse_fbmem(char *p)
}
early_param("fbmem", early_parse_fbmem);
-static inline void __init resource_init(void)
-{
- struct tag_mem_range *region;
-
- kernel_code.start = __pa(init_mm.start_code);
- kernel_code.end = __pa(init_mm.end_code - 1);
- kernel_data.start = __pa(init_mm.end_code);
- kernel_data.end = __pa(init_mm.brk - 1);
-
- for (region = mem_phys; region; region = region->next) {
- struct resource *res;
- unsigned long phys_start, phys_end;
-
- if (region->size == 0)
- continue;
-
- phys_start = region->addr;
- phys_end = phys_start + region->size - 1;
-
- res = alloc_bootmem_low(sizeof(*res));
- res->name = "System RAM";
- res->start = phys_start;
- res->end = phys_end;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-
- request_resource (&iomem_resource, res);
-
- if (kernel_code.start >= res->start &&
- kernel_code.end <= res->end)
- request_resource (res, &kernel_code);
- if (kernel_data.start >= res->start &&
- kernel_data.end <= res->end)
- request_resource (res, &kernel_data);
- }
-}
-
static int __init parse_tag_core(struct tag *tag)
{
if (tag->hdr.size > 2) {
@@ -159,11 +237,9 @@ static int __init parse_tag_core(struct tag *tag)
}
__tagtable(ATAG_CORE, parse_tag_core);
-static int __init parse_tag_mem_range(struct tag *tag,
- struct tag_mem_range **root)
+static int __init parse_tag_mem(struct tag *tag)
{
- struct tag_mem_range *cur, **pprev;
- struct tag_mem_range *new;
+ unsigned long start, end;
/*
* Ignore zero-sized entries. If we're running standalone, the
@@ -173,34 +249,53 @@ static int __init parse_tag_mem_range(struct tag *tag,
if (tag->u.mem_range.size == 0)
return 0;
- /*
- * Copy the data so the bootmem init code doesn't need to care
- * about it.
- */
- if (mem_range_next_free >= ARRAY_SIZE(mem_range_cache))
- panic("Physical memory map too complex!\n");
+ start = tag->u.mem_range.addr;
+ end = tag->u.mem_range.addr + tag->u.mem_range.size - 1;
- new = &mem_range_cache[mem_range_next_free++];
- *new = tag->u.mem_range;
+ add_physical_memory(start, end);
+ return 0;
+}
+__tagtable(ATAG_MEM, parse_tag_mem);
- pprev = root;
- cur = *root;
- while (cur) {
- pprev = &cur->next;
- cur = cur->next;
+static int __init parse_tag_rdimg(struct tag *tag)
+{
+#ifdef CONFIG_INITRD
+ struct tag_mem_range *mem = &tag->u.mem_range;
+ int ret;
+
+ if (initrd_start) {
+ printk(KERN_WARNING
+ "Warning: Only the first initrd image will be used\n");
+ return 0;
}
- *pprev = new;
- new->next = NULL;
+ ret = add_reserved_region(mem->start, mem->start + mem->size - 1,
+ "initrd");
+ if (ret) {
+ printk(KERN_WARNING
+ "Warning: Failed to reserve initrd memory\n");
+ return ret;
+ }
+
+ initrd_start = (unsigned long)__va(mem->addr);
+ initrd_end = initrd_start + mem->size;
+#else
+ printk(KERN_WARNING "RAM disk image present, but "
+ "no initrd support in kernel, ignoring\n");
+#endif
return 0;
}
+__tagtable(ATAG_RDIMG, parse_tag_rdimg);
-static int __init parse_tag_mem(struct tag *tag)
+static int __init parse_tag_rsvd_mem(struct tag *tag)
{
- return parse_tag_mem_range(tag, &mem_phys);
+ struct tag_mem_range *mem = &tag->u.mem_range;
+
+ return add_reserved_region(mem->addr, mem->addr + mem->size - 1,
+ "Reserved");
}
-__tagtable(ATAG_MEM, parse_tag_mem);
+__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
static int __init parse_tag_cmdline(struct tag *tag)
{
@@ -209,12 +304,6 @@ static int __init parse_tag_cmdline(struct tag *tag)
}
__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
-static int __init parse_tag_rdimg(struct tag *tag)
-{
- return parse_tag_mem_range(tag, &mem_ramdisk);
-}
-__tagtable(ATAG_RDIMG, parse_tag_rdimg);
-
static int __init parse_tag_clock(struct tag *tag)
{
/*
@@ -225,12 +314,6 @@ static int __init parse_tag_clock(struct tag *tag)
}
__tagtable(ATAG_CLOCK, parse_tag_clock);
-static int __init parse_tag_rsvd_mem(struct tag *tag)
-{
- return parse_tag_mem_range(tag, &mem_reserved);
-}
-__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
-
/*
* Scan the tag table for this tag, and call its parse function. The
* tag table is built by the linker from all the __tagtable
@@ -262,66 +345,16 @@ static void __init parse_tags(struct tag *t)
t->hdr.tag);
}
-static void __init print_memory_map(const char *what,
- struct tag_mem_range *mem)
-{
- printk ("%s:\n", what);
- for (; mem; mem = mem->next) {
- printk (" %08lx - %08lx\n",
- (unsigned long)mem->addr,
- (unsigned long)(mem->addr + mem->size));
- }
-}
-
-#define MAX_LOWMEM HIGHMEM_START
-#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
-
-/*
- * Sort a list of memory regions in-place by ascending address.
- *
- * We're using bubble sort because we only have singly linked lists
- * with few elements.
- */
-static void __init sort_mem_list(struct tag_mem_range **pmem)
-{
- int done;
- struct tag_mem_range **a, **b;
-
- if (!*pmem)
- return;
-
- do {
- done = 1;
- a = pmem, b = &(*pmem)->next;
- while (*b) {
- if ((*a)->addr > (*b)->addr) {
- struct tag_mem_range *tmp;
- tmp = (*b)->next;
- (*b)->next = *a;
- *a = *b;
- *b = tmp;
- done = 0;
- }
- a = &(*a)->next;
- b = &(*a)->next;
- }
- } while (!done);
-}
-
/*
* Find a free memory region large enough for storing the
* bootmem bitmap.
*/
static unsigned long __init
-find_bootmap_pfn(const struct tag_mem_range *mem)
+find_bootmap_pfn(const struct resource *mem)
{
unsigned long bootmap_pages, bootmap_len;
- unsigned long node_pages = PFN_UP(mem->size);
- unsigned long bootmap_addr = mem->addr;
- struct tag_mem_range *reserved = mem_reserved;
- struct tag_mem_range *ramdisk = mem_ramdisk;
- unsigned long kern_start = __pa(_stext);
- unsigned long kern_end = __pa(_end);
+ unsigned long node_pages = PFN_UP(mem->end - mem->start + 1);
+ unsigned long bootmap_start;
bootmap_pages = bootmem_bootmap_pages(node_pages);
bootmap_len = bootmap_pages << PAGE_SHIFT;
@@ -331,87 +364,43 @@ find_bootmap_pfn(const struct tag_mem_range *mem)
* storing the bootmem bitmap. We can take advantage of the
* fact that all lists have been sorted.
*
- * We have to check explicitly reserved regions as well as the
- * kernel image and any RAMDISK images...
- *
- * Oh, and we have to make sure we don't overwrite the taglist
- * since we're going to use it until the bootmem allocator is
- * fully up and running.
+ * We have to check that we don't collide with any reserved
+ * regions, which includes the kernel image and any RAMDISK
+ * images.
*/
- while (1) {
- if ((bootmap_addr < kern_end) &&
- ((bootmap_addr + bootmap_len) > kern_start))
- bootmap_addr = kern_end;
-
- while (reserved &&
- (bootmap_addr >= (reserved->addr + reserved->size)))
- reserved = reserved->next;
-
- if (reserved &&
- ((bootmap_addr + bootmap_len) >= reserved->addr)) {
- bootmap_addr = reserved->addr + reserved->size;
- continue;
- }
-
- while (ramdisk &&
- (bootmap_addr >= (ramdisk->addr + ramdisk->size)))
- ramdisk = ramdisk->next;
-
- if (!ramdisk ||
- ((bootmap_addr + bootmap_len) < ramdisk->addr))
- break;
-
- bootmap_addr = ramdisk->addr + ramdisk->size;
- }
-
- if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size))
- return ~0UL;
+ bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE);
- return PFN_UP(bootmap_addr);
+ return bootmap_start >> PAGE_SHIFT;
}
+#define MAX_LOWMEM HIGHMEM_START
+#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
+
static void __init setup_bootmem(void)
{
unsigned bootmap_size;
unsigned long first_pfn, bootmap_pfn, pages;
unsigned long max_pfn, max_low_pfn;
- unsigned long kern_start = __pa(_stext);
- unsigned long kern_end = __pa(_end);
unsigned node = 0;
- struct tag_mem_range *bank, *res;
+ struct resource *res;
- sort_mem_list(&mem_phys);
- sort_mem_list(&mem_reserved);
-
- print_memory_map("Physical memory", mem_phys);
- print_memory_map("Reserved memory", mem_reserved);
+ printk(KERN_INFO "Physical memory:\n");
+ for (res = system_ram; res; res = res->sibling)
+ printk(" %08x-%08x\n", res->start, res->end);
+ printk(KERN_INFO "Reserved memory:\n");
+ for (res = reserved; res; res = res->sibling)
+ printk(" %08x-%08x: %s\n",
+ res->start, res->end, res->name);
nodes_clear(node_online_map);
- if (mem_ramdisk) {
-#ifdef CONFIG_BLK_DEV_INITRD
- initrd_start = (unsigned long)__va(mem_ramdisk->addr);
- initrd_end = initrd_start + mem_ramdisk->size;
-
- print_memory_map("RAMDISK images", mem_ramdisk);
- if (mem_ramdisk->next)
- printk(KERN_WARNING
- "Warning: Only the first RAMDISK image "
- "will be used\n");
- sort_mem_list(&mem_ramdisk);
-#else
- printk(KERN_WARNING "RAM disk image present, but "
- "no initrd support in kernel!\n");
-#endif
- }
-
- if (mem_phys->next)
+ if (system_ram->sibling)
printk(KERN_WARNING "Only using first memory bank\n");
- for (bank = mem_phys; bank; bank = NULL) {
- first_pfn = PFN_UP(bank->addr);
- max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size);
- bootmap_pfn = find_bootmap_pfn(bank);
+ for (res = system_ram; res; res = NULL) {
+ first_pfn = PFN_UP(res->start);
+ max_low_pfn = max_pfn = PFN_DOWN(res->end + 1);
+ bootmap_pfn = find_bootmap_pfn(res);
if (bootmap_pfn > max_pfn)
panic("No space for bootmem bitmap!\n");
@@ -435,10 +424,6 @@ static void __init setup_bootmem(void)
bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
first_pfn, max_low_pfn);
- printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n",
- node, NODE_DATA(node)->bdata,
- NODE_DATA(node)->bdata->node_bootmem_map);
-
/*
* Register fully available RAM pages with the bootmem
* allocator.
@@ -447,51 +432,26 @@ static void __init setup_bootmem(void)
free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
PFN_PHYS(pages));
- /*
- * Reserve space for the kernel image (if present in
- * this node)...
- */
- if ((kern_start >= PFN_PHYS(first_pfn)) &&
- (kern_start < PFN_PHYS(max_pfn))) {
- printk("Node %u: Kernel image %08lx - %08lx\n",
- node, kern_start, kern_end);
- reserve_bootmem_node(NODE_DATA(node), kern_start,
- kern_end - kern_start);
- }
-
- /* ...the bootmem bitmap... */
+ /* Reserve space for the bootmem bitmap... */
reserve_bootmem_node(NODE_DATA(node),
PFN_PHYS(bootmap_pfn),
bootmap_size);
- /* ...any RAMDISK images... */
- for (res = mem_ramdisk; res; res = res->next) {
- if (res->addr > PFN_PHYS(max_pfn))
- break;
-
- if (res->addr >= PFN_PHYS(first_pfn)) {
- printk("Node %u: RAMDISK %08lx - %08lx\n",
- node,
- (unsigned long)res->addr,
- (unsigned long)(res->addr + res->size));
- reserve_bootmem_node(NODE_DATA(node),
- res->addr, res->size);
- }
- }
-
/* ...and any other reserved regions. */
- for (res = mem_reserved; res; res = res->next) {
- if (res->addr > PFN_PHYS(max_pfn))
+ for (res = reserved; res; res = res->sibling) {
+ if (res->start > PFN_PHYS(max_pfn))
break;
- if (res->addr >= PFN_PHYS(first_pfn)) {
- printk("Node %u: Reserved %08lx - %08lx\n",
- node,
- (unsigned long)res->addr,
- (unsigned long)(res->addr + res->size));
- reserve_bootmem_node(NODE_DATA(node),
- res->addr, res->size);
- }
+ /*
+ * resource_init will complain about partial
+ * overlaps, so we'll just ignore such
+ * resources for now.
+ */
+ if (res->start >= PFN_PHYS(first_pfn)
+ && res->end < PFN_PHYS(max_pfn))
+ reserve_bootmem_node(
+ NODE_DATA(node), res->start,
+ res->end - res->start + 1);
}
node_set_online(node);
@@ -502,6 +462,20 @@ void __init setup_arch (char **cmdline_p)
{
struct clk *cpu_clk;
+ init_mm.start_code = (unsigned long)_text;
+ init_mm.end_code = (unsigned long)_etext;
+ init_mm.end_data = (unsigned long)_edata;
+ init_mm.brk = (unsigned long)_end;
+
+ /*
+ * Include .init section to make allocations easier. It will
+ * be removed before the resource is actually requested.
+ */
+ kernel_code.start = __pa(__init_begin);
+ kernel_code.end = __pa(init_mm.end_code - 1);
+ kernel_data.start = __pa(init_mm.end_code);
+ kernel_data.end = __pa(init_mm.brk - 1);
+
parse_tags(bootloader_tags);
setup_processor();
@@ -527,11 +501,6 @@ void __init setup_arch (char **cmdline_p)
((cpu_hz + 500) / 1000) % 1000);
}
- init_mm.start_code = (unsigned long) &_text;
- init_mm.end_code = (unsigned long) &_etext;
- init_mm.end_data = (unsigned long) &_edata;
- init_mm.brk = (unsigned long) &_end;
-
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();