summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-10 10:16:17 +0100
committerIngo Molnar <mingo@elte.hu>2009-03-10 10:17:48 +0100
commit8293dd6f86e759068ce918aa10ca9c5d6d711cd0 (patch)
treede366d69078bf2b98c6765fa4ec1cc652f3d3173 /arch/x86/mm
parent631595fbf4aeac260e664a8a002897e4db6a50dd (diff)
parent467c88fee51e2ae862e9485245687da0730e29aa (diff)
Merge branch 'x86/core' into tracing/ftrace
Semantic merge: kernel/trace/trace_functions_graph.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/init_32.c23
-rw-r--r--arch/x86/mm/init_64.c8
-rw-r--r--arch/x86/mm/ioremap.c21
-rw-r--r--arch/x86/mm/kmmio.c15
-rw-r--r--arch/x86/mm/memtest.c3
6 files changed, 43 insertions, 29 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 6d63e3d1253..15219e0d124 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -134,8 +134,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
{
unsigned long page_size_mask = 0;
unsigned long start_pfn, end_pfn;
+ unsigned long ret = 0;
unsigned long pos;
- unsigned long ret;
struct map_range mr[NR_RANGE_MR];
int nr_range, i;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index d7f5060ab21..749559ed80f 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -806,11 +806,6 @@ static unsigned long __init setup_node_bootmem(int nodeid,
{
unsigned long bootmap_size;
- if (start_pfn > max_low_pfn)
- return bootmap;
- if (end_pfn > max_low_pfn)
- end_pfn = max_low_pfn;
-
/* don't touch min_low_pfn */
bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
bootmap >> PAGE_SHIFT,
@@ -843,13 +838,23 @@ void __init setup_bootmem_allocator(void)
max_pfn_mapped<<PAGE_SHIFT);
printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
+ for_each_online_node(nodeid) {
+ unsigned long start_pfn, end_pfn;
+
#ifdef CONFIG_NEED_MULTIPLE_NODES
- for_each_online_node(nodeid)
- bootmap = setup_node_bootmem(nodeid, node_start_pfn[nodeid],
- node_end_pfn[nodeid], bootmap);
+ start_pfn = node_start_pfn[nodeid];
+ end_pfn = node_end_pfn[nodeid];
+ if (start_pfn > max_low_pfn)
+ continue;
+ if (end_pfn > max_low_pfn)
+ end_pfn = max_low_pfn;
#else
- bootmap = setup_node_bootmem(0, 0, max_low_pfn, bootmap);
+ start_pfn = 0;
+ end_pfn = max_low_pfn;
#endif
+ bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
+ bootmap);
+ }
after_bootmem = 1;
}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 66d6be85df8..1753e8020df 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -85,7 +85,7 @@ early_param("gbpages", parse_direct_gbpages_on);
pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
EXPORT_SYMBOL_GPL(__supported_pte_mask);
-static int do_not_nx __cpuinitdata;
+static int disable_nx __cpuinitdata;
/*
* noexec=on|off
@@ -100,9 +100,9 @@ static int __init nonx_setup(char *str)
return -EINVAL;
if (!strncmp(str, "on", 2)) {
__supported_pte_mask |= _PAGE_NX;
- do_not_nx = 0;
+ disable_nx = 0;
} else if (!strncmp(str, "off", 3)) {
- do_not_nx = 1;
+ disable_nx = 1;
__supported_pte_mask &= ~_PAGE_NX;
}
return 0;
@@ -114,7 +114,7 @@ void __cpuinit check_efer(void)
unsigned long efer;
rdmsrl(MSR_EFER, efer);
- if (!(efer & EFER_NX) || do_not_nx)
+ if (!(efer & EFER_NX) || disable_nx)
__supported_pte_mask &= ~_PAGE_NX;
}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 62773abdf08..aca924a30ee 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -87,6 +87,8 @@ bool __virt_addr_valid(unsigned long x)
return false;
if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
return false;
+ if (x >= FIXADDR_START)
+ return false;
return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
}
EXPORT_SYMBOL(__virt_addr_valid);
@@ -504,13 +506,19 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr)
return &bm_pte[pte_index(addr)];
}
+static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
+
void __init early_ioremap_init(void)
{
pmd_t *pmd;
+ int i;
if (early_ioremap_debug)
printk(KERN_INFO "early_ioremap_init()\n");
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+ slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
memset(bm_pte, 0, sizeof(bm_pte));
pmd_populate_kernel(&init_mm, pmd, bm_pte);
@@ -577,6 +585,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
+
static int __init check_early_ioremap_leak(void)
{
int count = 0;
@@ -598,7 +607,8 @@ static int __init check_early_ioremap_leak(void)
}
late_initcall(check_early_ioremap_leak);
-static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
+static void __init __iomem *
+__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
{
unsigned long offset, last_addr;
unsigned int nrpages;
@@ -664,9 +674,9 @@ static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned lo
--nrpages;
}
if (early_ioremap_debug)
- printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
+ printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
- prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
+ prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
return prev_map[slot];
}
@@ -734,8 +744,3 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
}
prev_map[slot] = NULL;
}
-
-void __this_fixmap_does_not_exist(void)
-{
- WARN_ON(1);
-}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 9f205030d9a..6a518dd08a3 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -451,23 +451,24 @@ static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
static void remove_kmmio_fault_pages(struct rcu_head *head)
{
- struct kmmio_delayed_release *dr = container_of(
- head,
- struct kmmio_delayed_release,
- rcu);
+ struct kmmio_delayed_release *dr =
+ container_of(head, struct kmmio_delayed_release, rcu);
struct kmmio_fault_page *p = dr->release_list;
struct kmmio_fault_page **prevp = &dr->release_list;
unsigned long flags;
+
spin_lock_irqsave(&kmmio_lock, flags);
while (p) {
- if (!p->count)
+ if (!p->count) {
list_del_rcu(&p->list);
- else
+ prevp = &p->release_next;
+ } else {
*prevp = p->release_next;
- prevp = &p->release_next;
+ }
p = p->release_next;
}
spin_unlock_irqrestore(&kmmio_lock, flags);
+
/* This is the real RCU destroy call. */
call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
}
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 0bcd7883d03..605c8be0621 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -100,6 +100,9 @@ static int __init parse_memtest(char *arg)
{
if (arg)
memtest_pattern = simple_strtoul(arg, NULL, 0);
+ else
+ memtest_pattern = ARRAY_SIZE(patterns);
+
return 0;
}