summaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig6
-rw-r--r--arch/x86_64/ia32/ia32entry.S1
-rw-r--r--arch/x86_64/kernel/acpi/Makefile1
-rw-r--r--arch/x86_64/kernel/acpi/processor.c72
-rw-r--r--arch/x86_64/kernel/acpi/sleep.c7
-rw-r--r--arch/x86_64/kernel/apic.c2
-rw-r--r--arch/x86_64/kernel/i387.c2
-rw-r--r--arch/x86_64/kernel/setup.c95
-rw-r--r--arch/x86_64/mm/srat.c33
-rw-r--r--arch/x86_64/pci/mmconfig.c13
10 files changed, 117 insertions, 115 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 408d44a5975..af44130f0d6 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -299,6 +299,7 @@ config X86_64_ACPI_NUMA
bool "ACPI NUMA detection"
depends on NUMA
select ACPI
+ select PCI
select ACPI_NUMA
default y
help
@@ -389,6 +390,7 @@ config GART_IOMMU
bool "K8 GART IOMMU support"
default y
select SWIOTLB
+ select AGP
depends on PCI
help
Support for hardware IOMMU in AMD's Opteron/Athlon64 Processors
@@ -401,11 +403,9 @@ config GART_IOMMU
northbridge and a software emulation used on other systems without
hardware IOMMU. If unsure, say Y.
-# need this always enabled with GART_IOMMU for the VIA workaround
+# need this always selected by GART_IOMMU for the VIA workaround
config SWIOTLB
bool
- default y
- depends on GART_IOMMU
config X86_MCE
bool "Machine check support" if EMBEDDED
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 5a92fed2d1d..4ec594ab1a9 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -696,4 +696,5 @@ ia32_sys_call_table:
.quad sys_sync_file_range
.quad sys_tee
.quad compat_sys_vmsplice
+ .quad compat_sys_move_pages
ia32_syscall_end:
diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
index 4fe97071f29..080b9963f1b 100644
--- a/arch/x86_64/kernel/acpi/Makefile
+++ b/arch/x86_64/kernel/acpi/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += processor.o
+processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
endif
diff --git a/arch/x86_64/kernel/acpi/processor.c b/arch/x86_64/kernel/acpi/processor.c
deleted file mode 100644
index 3bdc2baa5bb..00000000000
--- a/arch/x86_64/kernel/acpi/processor.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * arch/x86_64/kernel/acpi/processor.c
- *
- * Copyright (C) 2005 Intel Corporation
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- * - Added _PDC for platforms with Intel CPUs
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-
-#include <acpi/processor.h>
-#include <asm/acpi.h>
-
-static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
-{
- struct acpi_object_list *obj_list;
- union acpi_object *obj;
- u32 *buf;
-
- /* allocate and initialize pdc. It will be used later. */
- obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
- if (!obj_list) {
- printk(KERN_ERR "Memory allocation error\n");
- return;
- }
-
- obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
- if (!obj) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj_list);
- return;
- }
-
- buf = kmalloc(12, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj);
- kfree(obj_list);
- return;
- }
-
- buf[0] = ACPI_PDC_REVISION_ID;
- buf[1] = 1;
- buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
-
- obj->type = ACPI_TYPE_BUFFER;
- obj->buffer.length = 12;
- obj->buffer.pointer = (u8 *) buf;
- obj_list->count = 1;
- obj_list->pointer = obj;
- pr->pdc = obj_list;
-
- return;
-}
-
-/* Initialize _PDC data based on the CPU vendor */
-void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
-{
- unsigned int cpu = pr->id;
- struct cpuinfo_x86 *c = cpu_data + cpu;
-
- pr->pdc = NULL;
- if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
- init_intel_pdc(pr, c);
-
- return;
-}
-
-EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c
index 867a0ebee17..091bc79c888 100644
--- a/arch/x86_64/kernel/acpi/sleep.c
+++ b/arch/x86_64/kernel/acpi/sleep.c
@@ -35,6 +35,8 @@
#include <linux/pci.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
+#include <linux/cpumask.h>
+
#include <asm/mpspec.h>
#include <asm/io.h>
#include <asm/apic.h>
@@ -66,7 +68,8 @@ static void init_low_mapping(void)
pgd_t *slot0 = pgd_offset(current->mm, 0UL);
low_ptr = *slot0;
set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
- flush_tlb_all();
+ WARN_ON(num_online_cpus() != 1);
+ local_flush_tlb();
}
/**
@@ -92,7 +95,7 @@ int acpi_save_state_mem(void)
void acpi_restore_state_mem(void)
{
set_pgd(pgd_offset(current->mm, 0UL), low_ptr);
- flush_tlb_all();
+ local_flush_tlb();
}
/**
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 100a30c4004..29ef99001e0 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -51,7 +51,7 @@ int disable_apic_timer __initdata;
static cpumask_t timer_interrupt_broadcast_ipi_mask;
/* Using APIC to generate smp_local_timer_interrupt? */
-int using_apic_timer = 0;
+int using_apic_timer __read_mostly = 0;
static void apic_pm_activate(void);
diff --git a/arch/x86_64/kernel/i387.c b/arch/x86_64/kernel/i387.c
index a5d7e16b928..44ddb1ec808 100644
--- a/arch/x86_64/kernel/i387.c
+++ b/arch/x86_64/kernel/i387.c
@@ -24,7 +24,7 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
-unsigned int mxcsr_feature_mask = 0xffffffff;
+unsigned int mxcsr_feature_mask __read_mostly = 0xffffffff;
void mxcsr_feature_mask_init(void)
{
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 655b9192eeb..b8d5116d737 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -47,6 +47,7 @@
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
#include <linux/ctype.h>
+#include <linux/suspend.h>
#include <asm/mtrr.h>
#include <asm/uaccess.h>
@@ -595,6 +596,100 @@ static void discover_ebda(void)
ebda_size = 64*1024;
}
+#ifdef CONFIG_SOFTWARE_SUSPEND
+static void __init mark_nosave_page_range(unsigned long start, unsigned long end)
+{
+ struct page *page;
+ while (start <= end) {
+ page = pfn_to_page(start);
+ SetPageNosave(page);
+ start++;
+ }
+}
+
+static void __init e820_nosave_reserved_pages(void)
+{
+ int i;
+ unsigned long r_start = 0, r_end = 0;
+
+ /* Assume e820 map is sorted */
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = round_down(ei->addr, PAGE_SIZE);
+ end = round_up(ei->addr + ei->size, PAGE_SIZE);
+ if (start >= end)
+ continue;
+ if (ei->type == E820_RESERVED)
+ continue;
+ r_end = start>>PAGE_SHIFT;
+ /* swsusp ignores invalid pfn, ignore these pages here */
+ if (r_end > end_pfn)
+ r_end = end_pfn;
+ if (r_end > r_start)
+ mark_nosave_page_range(r_start, r_end-1);
+ if (r_end >= end_pfn)
+ break;
+ r_start = end>>PAGE_SHIFT;
+ }
+}
+
+static void __init e820_save_acpi_pages(void)
+{
+ int i;
+
+ /* Assume e820 map is sorted */
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = ei->addr, PAGE_SIZE;
+ end = ei->addr + ei->size;
+ if (start >= end)
+ continue;
+ if (ei->type != E820_ACPI && ei->type != E820_NVS)
+ continue;
+ /*
+ * If the region is below end_pfn, it will be
+ * saved/restored by swsusp follow 'RAM' type.
+ */
+ if (start < (end_pfn << PAGE_SHIFT))
+ start = end_pfn << PAGE_SHIFT;
+ if (end > start)
+ swsusp_add_arch_pages(start, end);
+ }
+}
+
+extern char __start_rodata, __end_rodata;
+/*
+ * BIOS reserved region/hole - no save/restore
+ * ACPI NVS - save/restore
+ * ACPI Data - this is a little tricky, the mem could be used by OS after OS
+ * reads tables from the region, but anyway save/restore the memory hasn't any
+ * side effect and Linux runtime module load/unload might use it.
+ * kernel rodata - no save/restore (kernel rodata isn't changed)
+ */
+static int __init mark_nosave_pages(void)
+{
+ unsigned long pfn_start, pfn_end;
+
+ /* BIOS reserved regions & holes */
+ e820_nosave_reserved_pages();
+
+ /* kernel rodata */
+ pfn_start = round_up(__pa_symbol(&__start_rodata), PAGE_SIZE) >> PAGE_SHIFT;
+ pfn_end = round_down(__pa_symbol(&__end_rodata), PAGE_SIZE) >> PAGE_SHIFT;
+ mark_nosave_page_range(pfn_start, pfn_end-1);
+
+ /* record ACPI Data/NVS as saveable */
+ e820_save_acpi_pages();
+
+ return 0;
+}
+core_initcall(mark_nosave_pages);
+#endif
+
void __init setup_arch(char **cmdline_p)
{
unsigned long kernel_end;
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 474df22c6ed..502fce65e96 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -30,7 +30,6 @@
static struct acpi_table_slit *acpi_slit;
static nodemask_t nodes_parsed __initdata;
-static nodemask_t nodes_found __initdata;
static struct bootnode nodes[MAX_NUMNODES] __initdata;
static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
static int found_add_area __initdata;
@@ -38,33 +37,14 @@ int hotadd_percent __initdata = 0;
#ifndef RESERVE_HOTADD
#define hotadd_percent 0 /* Ignore all settings */
#endif
-static u8 pxm2node[256] = { [0 ... 255] = 0xff };
/* Too small nodes confuse the VM badly. Usually they result
from BIOS bugs. */
#define NODE_MIN_SIZE (4*1024*1024)
-static int node_to_pxm(int n);
-
-int pxm_to_node(int pxm)
-{
- if ((unsigned)pxm >= 256)
- return -1;
- /* Extend 0xff to (int)-1 */
- return (signed char)pxm2node[pxm];
-}
-
static __init int setup_node(int pxm)
{
- unsigned node = pxm2node[pxm];
- if (node == 0xff) {
- if (nodes_weight(nodes_found) >= MAX_NUMNODES)
- return -1;
- node = first_unset_node(nodes_found);
- node_set(node, nodes_found);
- pxm2node[pxm] = node;
- }
- return pxm2node[pxm];
+ return acpi_map_pxm_to_node(pxm);
}
static __init int conflicting_nodes(unsigned long start, unsigned long end)
@@ -440,17 +420,6 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
return 0;
}
-static int node_to_pxm(int n)
-{
- int i;
- if (pxm2node[n] == n)
- return n;
- for (i = 0; i < 256; i++)
- if (pxm2node[i] == n)
- return i;
- return 0;
-}
-
void __init srat_reserve_add_area(int nodeid)
{
if (found_add_area && nodes_add[nodeid].end) {
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index a2060e4d5de..3c55c76c6fd 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -13,7 +13,10 @@
#include "pci.h"
-#define MMCONFIG_APER_SIZE (256*1024*1024)
+/* aperture is up to 256MB but BIOS may reserve less */
+#define MMCONFIG_APER_MIN (2 * 1024*1024)
+#define MMCONFIG_APER_MAX (256 * 1024*1024)
+
/* Verify the first 16 busses. We assume that systems with more busses
get MCFG right. */
#define MAX_CHECK_BUS 16
@@ -175,9 +178,10 @@ void __init pci_mmcfg_init(void)
return;
if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
- pci_mmcfg_config[0].base_address + MMCONFIG_APER_SIZE,
+ pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
- printk(KERN_ERR "PCI: BIOS Bug: MCFG area is not E820-reserved\n");
+ printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
+ pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}
@@ -190,7 +194,8 @@ void __init pci_mmcfg_init(void)
}
for (i = 0; i < pci_mmcfg_config_num; ++i) {
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
- pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, MMCONFIG_APER_SIZE);
+ pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address,
+ MMCONFIG_APER_MAX);
if (!pci_mmcfg_virt[i].virt) {
printk("PCI: Cannot map mmconfig aperture for segment %d\n",
pci_mmcfg_config[i].pci_segment_group_number);