summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/a20.c75
-rw-r--r--arch/x86/include/asm/page.h1
-rw-r--r--arch/x86/include/asm/paravirt.h17
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/paravirt.c26
-rw-r--r--arch/x86/kernel/ptrace.c16
-rw-r--r--arch/x86/mm/ioremap.c19
-rw-r--r--arch/x86/mm/pageattr.c15
-rw-r--r--arch/x86/mm/pat.c83
9 files changed, 140 insertions, 114 deletions
diff --git a/arch/x86/boot/a20.c b/arch/x86/boot/a20.c
index 4063d630def..fba8e9c6a50 100644
--- a/arch/x86/boot/a20.c
+++ b/arch/x86/boot/a20.c
@@ -2,6 +2,7 @@
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007-2008 rPath, Inc. - All Rights Reserved
+ * Copyright 2009 Intel Corporation
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2.
@@ -15,16 +16,23 @@
#include "boot.h"
#define MAX_8042_LOOPS 100000
+#define MAX_8042_FF 32
static int empty_8042(void)
{
u8 status;
int loops = MAX_8042_LOOPS;
+ int ffs = MAX_8042_FF;
while (loops--) {
io_delay();
status = inb(0x64);
+ if (status == 0xff) {
+ /* FF is a plausible, but very unlikely status */
+ if (!--ffs)
+ return -1; /* Assume no KBC present */
+ }
if (status & 1) {
/* Read and discard input data */
io_delay();
@@ -118,44 +126,43 @@ static void enable_a20_fast(void)
int enable_a20(void)
{
-#if defined(CONFIG_X86_ELAN)
- /* Elan croaks if we try to touch the KBC */
- enable_a20_fast();
- while (!a20_test_long())
- ;
- return 0;
-#elif defined(CONFIG_X86_VOYAGER)
+#ifdef CONFIG_X86_VOYAGER
/* On Voyager, a20_test() is unsafe? */
enable_a20_kbc();
return 0;
#else
int loops = A20_ENABLE_LOOPS;
- while (loops--) {
- /* First, check to see if A20 is already enabled
- (legacy free, etc.) */
- if (a20_test_short())
- return 0;
-
- /* Next, try the BIOS (INT 0x15, AX=0x2401) */
- enable_a20_bios();
- if (a20_test_short())
- return 0;
-
- /* Try enabling A20 through the keyboard controller */
- empty_8042();
- if (a20_test_short())
- return 0; /* BIOS worked, but with delayed reaction */
-
- enable_a20_kbc();
- if (a20_test_long())
- return 0;
-
- /* Finally, try enabling the "fast A20 gate" */
- enable_a20_fast();
- if (a20_test_long())
- return 0;
- }
-
- return -1;
+ int kbc_err;
+
+ while (loops--) {
+ /* First, check to see if A20 is already enabled
+ (legacy free, etc.) */
+ if (a20_test_short())
+ return 0;
+
+ /* Next, try the BIOS (INT 0x15, AX=0x2401) */
+ enable_a20_bios();
+ if (a20_test_short())
+ return 0;
+
+ /* Try enabling A20 through the keyboard controller */
+ kbc_err = empty_8042();
+
+ if (a20_test_short())
+ return 0; /* BIOS worked, but with delayed reaction */
+
+ if (!kbc_err) {
+ enable_a20_kbc();
+ if (a20_test_long())
+ return 0;
+ }
+
+ /* Finally, try enabling the "fast A20 gate" */
+ enable_a20_fast();
+ if (a20_test_long())
+ return 0;
+ }
+
+ return -1;
#endif
}
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 40226999cbf..05f2da7f387 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -57,7 +57,6 @@ typedef struct { pgdval_t pgd; } pgd_t;
typedef struct { pgprotval_t pgprot; } pgprot_t;
extern int page_is_ram(unsigned long pagenr);
-extern int pagerange_is_ram(unsigned long start, unsigned long end);
extern int devmem_is_allowed(unsigned long pagenr);
extern void map_devmem(unsigned long pfn, unsigned long size,
pgprot_t vma_prot);
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 1c244b64573..b788dfd2048 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1431,14 +1431,7 @@ static inline void arch_leave_lazy_cpu_mode(void)
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
}
-static inline void arch_flush_lazy_cpu_mode(void)
-{
- if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
- arch_leave_lazy_cpu_mode();
- arch_enter_lazy_cpu_mode();
- }
-}
-
+void arch_flush_lazy_cpu_mode(void);
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
@@ -1451,13 +1444,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
-static inline void arch_flush_lazy_mmu_mode(void)
-{
- if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
- arch_leave_lazy_mmu_mode();
- arch_enter_lazy_mmu_mode();
- }
-}
+void arch_flush_lazy_mmu_mode(void);
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
unsigned long phys, pgprot_t flags)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 388254f69a2..a00545fe5cd 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -269,6 +269,8 @@ static void hpet_set_mode(enum clock_event_mode mode,
now = hpet_readl(HPET_COUNTER);
cmp = now + (unsigned long) delta;
cfg = hpet_readl(HPET_Tn_CFG(timer));
+ /* Make sure we use edge triggered interrupts */
+ cfg &= ~HPET_TN_LEVEL;
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
HPET_TN_SETVAL | HPET_TN_32BIT;
hpet_writel(cfg, HPET_Tn_CFG(timer));
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index cea11c8e304..6dc4dca255e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -286,6 +286,32 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return __get_cpu_var(paravirt_lazy_mode);
}
+void arch_flush_lazy_mmu_mode(void)
+{
+ preempt_disable();
+
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ WARN_ON(preempt_count() == 1);
+ arch_leave_lazy_mmu_mode();
+ arch_enter_lazy_mmu_mode();
+ }
+
+ preempt_enable();
+}
+
+void arch_flush_lazy_cpu_mode(void)
+{
+ preempt_disable();
+
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
+ WARN_ON(preempt_count() == 1);
+ arch_leave_lazy_cpu_mode();
+ arch_enter_lazy_cpu_mode();
+ }
+
+ preempt_enable();
+}
+
struct pv_info pv_info = {
.name = "bare hardware",
.paravirt_enabled = 0,
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 7ec39ab37a2..d2f7cd5b2c8 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -805,12 +805,16 @@ static void ptrace_bts_untrace(struct task_struct *child)
static void ptrace_bts_detach(struct task_struct *child)
{
- if (unlikely(child->bts)) {
- ds_release_bts(child->bts);
- child->bts = NULL;
-
- ptrace_bts_free_buffer(child);
- }
+ /*
+ * Ptrace_detach() races with ptrace_untrace() in case
+ * the child dies and is reaped by another thread.
+ *
+ * We only do the memory accounting at this point and
+ * leave the buffer deallocation and the bts tracer
+ * release to ptrace_bts_untrace() which will be called
+ * later on with tasklist_lock held.
+ */
+ release_locked_buffer(child->bts_buffer, child->bts_size);
}
#else
static inline void ptrace_bts_fork(struct task_struct *tsk) {}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 1448bcb7f22..433f7bd4648 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -134,25 +134,6 @@ int page_is_ram(unsigned long pagenr)
return 0;
}
-int pagerange_is_ram(unsigned long start, unsigned long end)
-{
- int ram_page = 0, not_rampage = 0;
- unsigned long page_nr;
-
- for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
- ++page_nr) {
- if (page_is_ram(page_nr))
- ram_page = 1;
- else
- not_rampage = 1;
-
- if (ram_page == not_rampage)
- return -1;
- }
-
- return ram_page;
-}
-
/*
* Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts.
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 84ba74820ad..8ca0d8566fc 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -575,7 +575,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
address = cpa->vaddr[cpa->curpage];
else
address = *cpa->vaddr;
-
repeat:
kpte = lookup_address(address, &level);
if (!kpte)
@@ -812,6 +811,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
vm_unmap_aliases();
+ /*
+ * If we're called with lazy mmu updates enabled, the
+ * in-memory pte state may be stale. Flush pending updates to
+ * bring them up to date.
+ */
+ arch_flush_lazy_mmu_mode();
+
cpa.vaddr = addr;
cpa.numpages = numpages;
cpa.mask_set = mask_set;
@@ -854,6 +860,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
} else
cpa_flush_all(cache);
+ /*
+ * If we've been called with lazy mmu updates enabled, then
+ * make sure that everything gets flushed out before we
+ * return.
+ */
+ arch_flush_lazy_mmu_mode();
+
out:
return ret;
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 9127e31c726..05f9aef6818 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -220,6 +220,33 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
static struct memtype *cached_entry;
static u64 cached_start;
+static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
+{
+ int ram_page = 0, not_rampage = 0;
+ unsigned long page_nr;
+
+ for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
+ ++page_nr) {
+ /*
+ * For legacy reasons, physical address range in the legacy ISA
+ * region is tracked as non-RAM. This will allow users of
+ * /dev/mem to map portions of legacy ISA region, even when
+ * some of those portions are listed(or not even listed) with
+ * different e820 types(RAM/reserved/..)
+ */
+ if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
+ page_is_ram(page_nr))
+ ram_page = 1;
+ else
+ not_rampage = 1;
+
+ if (ram_page == not_rampage)
+ return -1;
+ }
+
+ return ram_page;
+}
+
/*
* For RAM pages, mark the pages as non WB memory type using
* PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
@@ -345,20 +372,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (new_type)
*new_type = actual_type;
- /*
- * For legacy reasons, some parts of the physical address range in the
- * legacy 1MB region is treated as non-RAM (even when listed as RAM in
- * the e820 tables). So we will track the memory attributes of this
- * legacy 1MB region using the linear memtype_list always.
- */
- if (end >= ISA_END_ADDRESS) {
- is_range_ram = pagerange_is_ram(start, end);
- if (is_range_ram == 1)
- return reserve_ram_pages_type(start, end, req_type,
- new_type);
- else if (is_range_ram < 0)
- return -EINVAL;
- }
+ is_range_ram = pat_pagerange_is_ram(start, end);
+ if (is_range_ram == 1)
+ return reserve_ram_pages_type(start, end, req_type,
+ new_type);
+ else if (is_range_ram < 0)
+ return -EINVAL;
new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
if (!new)
@@ -455,19 +474,11 @@ int free_memtype(u64 start, u64 end)
if (is_ISA_range(start, end - 1))
return 0;
- /*
- * For legacy reasons, some parts of the physical address range in the
- * legacy 1MB region is treated as non-RAM (even when listed as RAM in
- * the e820 tables). So we will track the memory attributes of this
- * legacy 1MB region using the linear memtype_list always.
- */
- if (end >= ISA_END_ADDRESS) {
- is_range_ram = pagerange_is_ram(start, end);
- if (is_range_ram == 1)
- return free_ram_pages_type(start, end);
- else if (is_range_ram < 0)
- return -EINVAL;
- }
+ is_range_ram = pat_pagerange_is_ram(start, end);
+ if (is_range_ram == 1)
+ return free_ram_pages_type(start, end);
+ else if (is_range_ram < 0)
+ return -EINVAL;
spin_lock(&memtype_lock);
list_for_each_entry(entry, &memtype_list, nd) {
@@ -635,17 +646,13 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
unsigned long flags;
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
- is_ram = pagerange_is_ram(paddr, paddr + size);
+ is_ram = pat_pagerange_is_ram(paddr, paddr + size);
- if (is_ram != 0) {
- /*
- * For mapping RAM pages, drivers need to call
- * set_memory_[uc|wc|wb] directly, for reserve and free, before
- * setting up the PTE.
- */
- WARN_ON_ONCE(1);
- return 0;
- }
+ /*
+ * reserve_pfn_range() doesn't support RAM pages.
+ */
+ if (is_ram != 0)
+ return -EINVAL;
ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
if (ret)
@@ -702,7 +709,7 @@ static void free_pfn_range(u64 paddr, unsigned long size)
{
int is_ram;
- is_ram = pagerange_is_ram(paddr, paddr + size);
+ is_ram = pat_pagerange_is_ram(paddr, paddr + size);
if (is_ram == 0)
free_memtype(paddr, paddr + size);
}