summaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 19:56:02 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 19:56:02 -0800
commit4060994c3e337b40e0f6fa8ce2cc178e021baf3d (patch)
tree980297c1747ca89354bc879cc5d17903eacb19e2 /arch/x86_64
parent0174f72f848dfe7dc7488799776303c81b181b16 (diff)
parentd3ee871e63d0a0c70413dc0aa5534b8d6cd6ec37 (diff)
Merge x86-64 update from Andi
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig57
-rw-r--r--arch/x86_64/Kconfig.debug9
-rw-r--r--arch/x86_64/defconfig98
-rw-r--r--arch/x86_64/ia32/ia32_aout.c3
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c4
-rw-r--r--arch/x86_64/kernel/Makefile1
-rw-r--r--arch/x86_64/kernel/aperture.c2
-rw-r--r--arch/x86_64/kernel/apic.c10
-rw-r--r--arch/x86_64/kernel/e820.c3
-rw-r--r--arch/x86_64/kernel/entry.S3
-rw-r--r--arch/x86_64/kernel/head.S37
-rw-r--r--arch/x86_64/kernel/head64.c14
-rw-r--r--arch/x86_64/kernel/i8259.c2
-rw-r--r--arch/x86_64/kernel/io_apic.c80
-rw-r--r--arch/x86_64/kernel/mce.c17
-rw-r--r--arch/x86_64/kernel/mce_amd.c538
-rw-r--r--arch/x86_64/kernel/mpparse.c23
-rw-r--r--arch/x86_64/kernel/pci-gart.c8
-rw-r--r--arch/x86_64/kernel/process.c47
-rw-r--r--arch/x86_64/kernel/reboot.c7
-rw-r--r--arch/x86_64/kernel/setup.c89
-rw-r--r--arch/x86_64/kernel/setup64.c2
-rw-r--r--arch/x86_64/kernel/signal.c17
-rw-r--r--arch/x86_64/kernel/smp.c7
-rw-r--r--arch/x86_64/kernel/smpboot.c111
-rw-r--r--arch/x86_64/kernel/sys_x86_64.c14
-rw-r--r--arch/x86_64/kernel/traps.c44
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c3
-rw-r--r--arch/x86_64/lib/clear_page.S38
-rw-r--r--arch/x86_64/lib/copy_page.S87
-rw-r--r--arch/x86_64/lib/memcpy.S93
-rw-r--r--arch/x86_64/lib/memset.S94
-rw-r--r--arch/x86_64/mm/fault.c19
-rw-r--r--arch/x86_64/mm/init.c129
-rw-r--r--arch/x86_64/mm/k8topology.c1
-rw-r--r--arch/x86_64/mm/numa.c122
-rw-r--r--arch/x86_64/mm/srat.c6
38 files changed, 1178 insertions, 663 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 4cce2f6f170..6ece645e4db 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -226,22 +226,42 @@ config SCHED_SMT
source "kernel/Kconfig.preempt"
-config K8_NUMA
- bool "K8 NUMA support"
- select NUMA
+config NUMA
+ bool "Non Uniform Memory Access (NUMA) Support"
depends on SMP
help
- Enable NUMA (Non Unified Memory Architecture) support for
- AMD Opteron Multiprocessor systems. The kernel will try to allocate
- memory used by a CPU on the local memory controller of the CPU
- and add some more NUMA awareness to the kernel.
- This code is recommended on all multiprocessor Opteron systems
- and normally doesn't hurt on others.
+ Enable NUMA (Non Uniform Memory Access) support. The kernel
+ will try to allocate memory used by a CPU on the local memory
+ controller of the CPU and add some more NUMA awareness to the kernel.
+ This code is recommended on all multiprocessor Opteron systems.
+ If the system is EM64T, you should say N unless your system is EM64T
+ NUMA.
+
+config K8_NUMA
+ bool "Old style AMD Opteron NUMA detection"
+ depends on NUMA
+ default y
+ help
+ Enable K8 NUMA node topology detection. You should say Y here if
+ you have a multi processor AMD K8 system. This uses an old
+ method to read the NUMA configurtion directly from the builtin
+ Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
+ instead, which also takes priority if both are compiled in.
+
+# Dummy CONFIG option to select ACPI_NUMA from drivers/acpi/Kconfig.
+
+config X86_64_ACPI_NUMA
+ bool "ACPI NUMA detection"
+ depends on NUMA
+ select ACPI
+ select ACPI_NUMA
+ default y
+ help
+ Enable ACPI SRAT based node topology detection.
config NUMA_EMU
- bool "NUMA emulation support"
- select NUMA
- depends on SMP
+ bool "NUMA emulation"
+ depends on NUMA
help
Enable NUMA emulation. A flat machine will be split
into virtual nodes when booted with "numa=fake=N", where N is the
@@ -252,9 +272,6 @@ config ARCH_DISCONTIGMEM_ENABLE
depends on NUMA
default y
-config NUMA
- bool
- default n
config ARCH_DISCONTIGMEM_ENABLE
def_bool y
@@ -374,6 +391,14 @@ config X86_MCE_INTEL
Additional support for intel specific MCE features such as
the thermal monitor.
+config X86_MCE_AMD
+ bool "AMD MCE features"
+ depends on X86_MCE && X86_LOCAL_APIC
+ default y
+ help
+ Additional support for AMD specific MCE features such as
+ the DRAM Error Threshold.
+
config PHYSICAL_START
hex "Physical address where the kernel is loaded" if EMBEDDED
default "0x100000"
@@ -502,7 +527,7 @@ config IA32_EMULATION
left.
config IA32_AOUT
- bool "IA32 a.out support"
+ tristate "IA32 a.out support"
depends on IA32_EMULATION
help
Support old a.out binaries in the 32bit emulation.
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index d584ecc27ea..e2c6e64a85e 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -2,15 +2,6 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-# !SMP for now because the context switch early causes GPF in segment reloading
-# and the GS base checking does the wrong thing then, causing a hang.
-config CHECKING
- bool "Additional run-time checks"
- depends on DEBUG_KERNEL && !SMP
- help
- Enables some internal consistency checks for kernel debugging.
- You should normally say N.
-
config INIT_DEBUG
bool "Debug __init statements"
depends on DEBUG_KERNEL
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index f8db7e500fb..5d56542fb68 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.13-git11
-# Mon Sep 12 16:16:16 2005
+# Linux kernel version: 2.6.14-git7
+# Sat Nov 5 15:55:50 2005
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@@ -35,7 +35,7 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
-# CONFIG_HOTPLUG is not set
+CONFIG_HOTPLUG=y
CONFIG_KOBJECT_UEVENT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -93,10 +93,11 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_PREEMPT_BKL=y
+CONFIG_NUMA=y
CONFIG_K8_NUMA=y
+CONFIG_X86_64_ACPI_NUMA=y
# CONFIG_NUMA_EMU is not set
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
-CONFIG_NUMA=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_SELECT_MEMORY_MODEL=y
@@ -107,9 +108,10 @@ CONFIG_DISCONTIGMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_NEED_MULTIPLE_NODES=y
# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
-CONFIG_HAVE_DEC_LOCK=y
CONFIG_NR_CPUS=32
+CONFIG_HOTPLUG_CPU=y
CONFIG_HPET_TIMER=y
CONFIG_X86_PM_TIMER=y
CONFIG_HPET_EMULATE_RTC=y
@@ -117,6 +119,7 @@ CONFIG_GART_IOMMU=y
CONFIG_SWIOTLB=y
CONFIG_X86_MCE=y
CONFIG_X86_MCE_INTEL=y
+CONFIG_X86_MCE_AMD=y
CONFIG_PHYSICAL_START=0x100000
# CONFIG_KEXEC is not set
CONFIG_SECCOMP=y
@@ -136,11 +139,15 @@ CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
CONFIG_SOFTWARE_SUSPEND=y
CONFIG_PM_STD_PARTITION=""
+CONFIG_SUSPEND_SMP=y
#
# ACPI (Advanced Configuration and Power Interface) Support
#
CONFIG_ACPI=y
+CONFIG_ACPI_SLEEP=y
+CONFIG_ACPI_SLEEP_PROC_FS=y
+CONFIG_ACPI_SLEEP_PROC_SLEEP=y
CONFIG_ACPI_AC=y
CONFIG_ACPI_BATTERY=y
CONFIG_ACPI_BUTTON=y
@@ -148,6 +155,7 @@ CONFIG_ACPI_BUTTON=y
CONFIG_ACPI_HOTKEY=m
CONFIG_ACPI_FAN=y
CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_THERMAL=y
CONFIG_ACPI_NUMA=y
# CONFIG_ACPI_ASUS is not set
@@ -158,7 +166,7 @@ CONFIG_ACPI_BLACKLIST_YEAR=2001
CONFIG_ACPI_EC=y
CONFIG_ACPI_POWER=y
CONFIG_ACPI_SYSTEM=y
-# CONFIG_ACPI_CONTAINER is not set
+CONFIG_ACPI_CONTAINER=y
#
# CPU Frequency scaling
@@ -293,7 +301,6 @@ CONFIG_IPV6=y
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETFILTER_NETLINK is not set
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
@@ -312,6 +319,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_DEBUG_DRIVER is not set
#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
# Memory Technology Devices (MTD)
#
# CONFIG_MTD is not set
@@ -354,6 +366,11 @@ CONFIG_IOSCHED_NOOP=y
# CONFIG_IOSCHED_AS is not set
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+CONFIG_DEFAULT_DEADLINE=y
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
# CONFIG_ATA_OVER_ETH is not set
#
@@ -450,6 +467,7 @@ CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SPI_ATTRS=y
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
#
# SCSI low-level drivers
@@ -469,20 +487,24 @@ CONFIG_AIC79XX_DEBUG_MASK=0
# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
CONFIG_SCSI_SATA=y
# CONFIG_SCSI_SATA_AHCI is not set
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_MV is not set
-# CONFIG_SCSI_SATA_NV is not set
-# CONFIG_SCSI_SATA_PROMISE is not set
+CONFIG_SCSI_SATA_NV=y
+# CONFIG_SCSI_PDC_ADMA is not set
# CONFIG_SCSI_SATA_QSTOR is not set
+# CONFIG_SCSI_SATA_PROMISE is not set
# CONFIG_SCSI_SATA_SX4 is not set
# CONFIG_SCSI_SATA_SIL is not set
+# CONFIG_SCSI_SATA_SIL24 is not set
# CONFIG_SCSI_SATA_SIS is not set
# CONFIG_SCSI_SATA_ULI is not set
CONFIG_SCSI_SATA_VIA=y
# CONFIG_SCSI_SATA_VITESSE is not set
+CONFIG_SCSI_SATA_INTEL_COMBINED=y
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_EATA is not set
@@ -525,6 +547,7 @@ CONFIG_BLK_DEV_DM=y
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
CONFIG_FUSION_MAX_SGE=128
# CONFIG_FUSION_CTL is not set
@@ -564,6 +587,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_HAPPYMEAL is not set
# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
CONFIG_NET_VENDOR_3COM=y
CONFIG_VORTEX=y
# CONFIG_TYPHOON is not set
@@ -740,7 +764,43 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# Watchdog Cards
#
-# CONFIG_WATCHDOG is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
+# CONFIG_ALIM1535_WDT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_SC520_WDT is not set
+# CONFIG_EUROTECH_WDT is not set
+# CONFIG_IB700_WDT is not set
+# CONFIG_IBMASR is not set
+# CONFIG_WAFER_WDT is not set
+# CONFIG_I6300ESB_WDT is not set
+# CONFIG_I8XX_TCO is not set
+# CONFIG_SC1200_WDT is not set
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_CPU5_WDT is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_W83977F_WDT is not set
+# CONFIG_MACHZ_WDT is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
CONFIG_HW_RANDOM=y
# CONFIG_NVRAM is not set
CONFIG_RTC=y
@@ -767,6 +827,7 @@ CONFIG_MAX_RAW_DEVS=256
# TPM devices
#
# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
#
# I2C support
@@ -783,6 +844,7 @@ CONFIG_MAX_RAW_DEVS=256
#
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_HDAPS is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
#
@@ -886,12 +948,15 @@ CONFIG_USB_UHCI_HCD=y
# USB Device Class drivers
#
# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
-# CONFIG_USB_BLUETOOTH_TTY is not set
# CONFIG_USB_ACM is not set
CONFIG_USB_PRINTER=y
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
@@ -924,6 +989,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
#
# USB Imaging devices
@@ -1005,7 +1071,7 @@ CONFIG_USB_MON=y
#
# CONFIG_EDD is not set
# CONFIG_DELL_RBU is not set
-CONFIG_DCDBAS=m
+# CONFIG_DCDBAS is not set
#
# File systems
@@ -1037,7 +1103,7 @@ CONFIG_INOTIFY=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=y
-# CONFIG_AUTOFS4_FS is not set
+CONFIG_AUTOFS4_FS=y
# CONFIG_FUSE_FS is not set
#
@@ -1068,7 +1134,7 @@ CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
-# CONFIG_RELAYFS_FS is not set
+CONFIG_RELAYFS_FS=y
#
# Miscellaneous filesystems
@@ -1186,7 +1252,9 @@ CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
CONFIG_DEBUG_FS=y
+# CONFIG_DEBUG_VM is not set
# CONFIG_FRAME_POINTER is not set
+# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_INIT_DEBUG=y
# CONFIG_IOMMU_DEBUG is not set
CONFIG_KPROBES=y
diff --git a/arch/x86_64/ia32/ia32_aout.c b/arch/x86_64/ia32/ia32_aout.c
index 93c60f4aa47..3bf58af9893 100644
--- a/arch/x86_64/ia32/ia32_aout.c
+++ b/arch/x86_64/ia32/ia32_aout.c
@@ -36,9 +36,6 @@
#undef WARN_OLD
#undef CORE_DUMP /* probably broken */
-extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
- unsigned long stack_top, int exec_stack);
-
static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
static int load_aout_library(struct file*);
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index d9161e39597..830feb272ec 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -335,7 +335,8 @@ static void elf32_init(struct pt_regs *regs)
me->thread.es = __USER_DS;
}
-int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int executable_stack)
+int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
+ int executable_stack)
{
unsigned long stack_base;
struct vm_area_struct *mpnt;
@@ -389,6 +390,7 @@ int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int exec
return 0;
}
+EXPORT_SYMBOL(ia32_setup_arg_pages);
static unsigned long
elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index 14328cab5d3..fe4cbd1c4b2 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -11,6 +11,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
obj-$(CONFIG_X86_MCE) += mce.o
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
+obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o
obj-$(CONFIG_MTRR) += ../../i386/kernel/cpu/mtrr/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_X86_MSR) += msr.o
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index 962ad4823b6..c7f4fdd20f0 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -196,7 +196,7 @@ static __u32 __init search_agp_bridge(u32 *order, int *valid_agp)
void __init iommu_hole_init(void)
{
int fix, num;
- u32 aper_size, aper_alloc = 0, aper_order, last_aper_order = 0;
+ u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0;
u64 aper_base, last_aper_base = 0;
int valid_agp = 0;
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index b6e7715d877..18691ce4c75 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -833,6 +833,16 @@ int setup_profiling_timer(unsigned int multiplier)
return 0;
}
+#ifdef CONFIG_X86_MCE_AMD
+void setup_threshold_lvt(unsigned long lvt_off)
+{
+ unsigned int v = 0;
+ unsigned long reg = (lvt_off << 4) + 0x500;
+ v |= THRESHOLD_APIC_VECTOR;
+ apic_write(reg, v);
+}
+#endif /* CONFIG_X86_MCE_AMD */
+
#undef APIC_DIVISOR
/*
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index ab3f87aaff7..17579a1a174 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -23,8 +23,7 @@
#include <asm/e820.h>
#include <asm/proto.h>
#include <asm/bootsetup.h>
-
-extern char _end[];
+#include <asm/sections.h>
/*
* PFN of last memory page.
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 7937971d185..9ff42041bb6 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -612,6 +612,9 @@ retint_kernel:
ENTRY(thermal_interrupt)
apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
+ENTRY(threshold_interrupt)
+ apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
+
#ifdef CONFIG_SMP
ENTRY(reschedule_interrupt)
apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index b92e5f45ed4..15290968e49 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/threads.h>
+#include <linux/init.h>
#include <asm/desc.h>
#include <asm/segment.h>
#include <asm/page.h>
@@ -70,7 +71,7 @@ startup_32:
movl %eax, %cr4
/* Setup early boot stage 4 level pagetables */
- movl $(init_level4_pgt - __START_KERNEL_map), %eax
+ movl $(boot_level4_pgt - __START_KERNEL_map), %eax
movl %eax, %cr3
/* Setup EFER (Extended Feature Enable Register) */
@@ -113,7 +114,7 @@ startup_64:
movq %rax, %cr4
/* Setup early boot stage 4 level pagetables. */
- movq $(init_level4_pgt - __START_KERNEL_map), %rax
+ movq $(boot_level4_pgt - __START_KERNEL_map), %rax
movq %rax, %cr3
/* Check if nx is implemented */
@@ -240,20 +241,10 @@ ljumpvector:
ENTRY(stext)
ENTRY(_stext)
- /*
- * This default setting generates an ident mapping at address 0x100000
- * and a mapping for the kernel that precisely maps virtual address
- * 0xffffffff80000000 to physical address 0x000000. (always using
- * 2Mbyte large pages provided by PAE mode)
- */
.org 0x1000
ENTRY(init_level4_pgt)
- .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
- .fill 255,8,0
- .quad 0x000000000000a007 + __PHYSICAL_START
- .fill 254,8,0
- /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
- .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
+ /* This gets initialized in x86_64_start_kernel */
+ .fill 512,8,0
.org 0x2000
ENTRY(level3_ident_pgt)
@@ -350,6 +341,24 @@ ENTRY(wakeup_level4_pgt)
.quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
#endif
+#ifndef CONFIG_HOTPLUG_CPU
+ __INITDATA
+#endif
+ /*
+ * This default setting generates an ident mapping at address 0x100000
+ * and a mapping for the kernel that precisely maps virtual address
+ * 0xffffffff80000000 to physical address 0x000000. (always using
+ * 2Mbyte large pages provided by PAE mode)
+ */
+ .align PAGE_SIZE
+ENTRY(boot_level4_pgt)
+ .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
+ .fill 255,8,0
+ .quad 0x000000000000a007 + __PHYSICAL_START
+ .fill 254,8,0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
+
.data
.align 16
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index cf6ab147a2a..b675c5add01 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -19,14 +19,15 @@
#include <asm/bootsetup.h>
#include <asm/setup.h>
#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
/* Don't add a printk in there. printk relies on the PDA which is not initialized
yet. */
static void __init clear_bss(void)
{
- extern char __bss_start[], __bss_end[];
memset(__bss_start, 0,
- (unsigned long) __bss_end - (unsigned long) __bss_start);
+ (unsigned long) __bss_stop - (unsigned long) __bss_start);
}
#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
@@ -75,8 +76,6 @@ static void __init setup_boot_cpu_data(void)
boot_cpu_data.x86_mask = eax & 0xf;
}
-extern char _end[];
-
void __init x86_64_start_kernel(char * real_mode_data)
{
char *s;
@@ -86,6 +85,13 @@ void __init x86_64_start_kernel(char * real_mode_data)
set_intr_gate(i, early_idt_handler);
asm volatile("lidt %0" :: "m" (idt_descr));
clear_bss();
+
+ /*
+ * switch to init_level4_pgt from boot_level4_pgt
+ */
+ memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t));
+ asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
+
pda_init(0);
copy_bootdata(real_mode_data);
#ifdef CONFIG_SMP
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index a9368d4c4ab..6e5101ad3d1 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -492,6 +492,7 @@ void invalidate_interrupt5(void);
void invalidate_interrupt6(void);
void invalidate_interrupt7(void);
void thermal_interrupt(void);
+void threshold_interrupt(void);
void i8254_timer_resume(void);
static void setup_timer_hardware(void)
@@ -580,6 +581,7 @@ void __init init_IRQ(void)
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
#endif
set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
+ set_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
#ifdef CONFIG_X86_LOCAL_APIC
/* self generated IPI for local APIC timer */
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index c8eee20cd51..97154ab058b 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -57,7 +57,7 @@ int nr_ioapic_registers[MAX_IO_APICS];
* Rough estimation of how many shared IRQs there are, can
* be changed anytime.
*/
-#define MAX_PLUS_SHARED_IRQS NR_IRQS
+#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
/*
@@ -85,6 +85,7 @@ int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
int pin; \
struct irq_pin_list *entry = irq_2_pin + irq; \
\
+ BUG_ON(irq >= NR_IRQS); \
for (;;) { \
unsigned int reg; \
pin = entry->pin; \
@@ -127,6 +128,8 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
}
#endif
+static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
+
/*
* The common case is 1:1 IRQ<->pin mappings. Sometimes there are
* shared ISA-space IRQs, so we have to support them. We are super
@@ -137,6 +140,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
static int first_free_entry = NR_IRQS;
struct irq_pin_list *entry = irq_2_pin + irq;
+ BUG_ON(irq >= NR_IRQS);
while (entry->next)
entry = irq_2_pin + entry->next;
@@ -144,7 +148,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
entry->next = first_free_entry;
entry = irq_2_pin + entry->next;
if (++first_free_entry >= PIN_MAP_SIZE)
- panic("io_apic.c: whoops");
+ panic("io_apic.c: ran out of irq_2_pin entries!");
}
entry->apic = apic;
entry->pin = pin;
@@ -420,6 +424,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
best_guess = irq;
}
}
+ BUG_ON(best_guess >= NR_IRQS);
return best_guess;
}
@@ -610,6 +615,64 @@ static inline int irq_trigger(int idx)
return MPBIOS_trigger(idx);
}
+static int next_irq = 16;
+
+/*
+ * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
+ * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
+ * from ACPI, which can reach 800 in large boxen.
+ *
+ * Compact the sparse GSI space into a sequential IRQ series and reuse
+ * vectors if possible.
+ */
+int gsi_irq_sharing(int gsi)
+{
+ int i, tries, vector;
+
+ BUG_ON(gsi >= NR_IRQ_VECTORS);
+
+ if (platform_legacy_irq(gsi))
+ return gsi;
+
+ if (gsi_2_irq[gsi] != 0xFF)
+ return (int)gsi_2_irq[gsi];
+
+ tries = NR_IRQS;
+ try_again:
+ vector = assign_irq_vector(gsi);
+
+ /*
+ * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
+ * use of vector and if found, return that IRQ. However, we never want
+ * to share legacy IRQs, which usually have a different trigger mode
+ * than PCI.
+ */
+ for (i = 0; i < NR_IRQS; i++)
+ if (IO_APIC_VECTOR(i) == vector)
+ break;
+ if (platform_legacy_irq(i)) {
+ if (--tries >= 0) {
+ IO_APIC_VECTOR(i) = 0;
+ goto try_again;
+ }
+ panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
+ }
+ if (i < NR_IRQS) {
+ gsi_2_irq[gsi] = i;
+ printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
+ gsi, vector, i);
+ return i;
+ }
+
+ i = next_irq++;
+ BUG_ON(i >= NR_IRQS);
+ gsi_2_irq[gsi] = i;
+ IO_APIC_VECTOR(i) = vector;
+ printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
+ gsi, vector, i);
+ return i;
+}
+
static int pin_2_irq(int idx, int apic, int pin)
{
int irq, i;
@@ -639,6 +702,7 @@ static int pin_2_irq(int idx, int apic, int pin)
while (i < apic)
irq += nr_ioapic_registers[i++];
irq += pin;
+ irq = gsi_irq_sharing(irq);
break;
}
default:
@@ -648,6 +712,7 @@ static int pin_2_irq(int idx, int apic, int pin)
break;
}
}
+ BUG_ON(irq >= NR_IRQS);
/*
* PCI IRQ command line redirection. Yes, limits are hardcoded.
@@ -663,6 +728,7 @@ static int pin_2_irq(int idx, int apic, int pin)
}
}
}
+ BUG_ON(irq >= NR_IRQS);
return irq;
}
@@ -690,8 +756,8 @@ int assign_irq_vector(int irq)
{
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
- BUG_ON(irq >= NR_IRQ_VECTORS);
- if (IO_APIC_VECTOR(irq) > 0)
+ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
return IO_APIC_VECTOR(irq);
next:
current_vector += 8;
@@ -699,9 +765,8 @@ next:
goto next;
if (current_vector >= FIRST_SYSTEM_VECTOR) {
- offset++;
- if (!(offset%8))
- return -ENOSPC;
+ /* If we run out of vectors on large boxen, must share them. */
+ offset = (offset + 1) % 8;
current_vector = FIRST_DEVICE_VECTOR + offset;
}
@@ -1917,6 +1982,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
entry.polarity = active_high_low;
entry.mask = 1; /* Disabled (masked) */
+ irq = gsi_irq_sharing(irq);
/*
* IRQs < 16 are already in the irq_2_pin[] map
*/
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 69541db5ff2..183dc610542 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -37,7 +37,7 @@ static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
static unsigned long console_logged;
static int notify_user;
static int rip_msr;
-static int mce_bootlog;
+static int mce_bootlog = 1;
/*
* Lockless MCE logging infrastructure.
@@ -347,7 +347,11 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
/* disable GART TBL walk error reporting, which trips off
incorrectly with the IOMMU & 3ware & Cerberus. */
clear_bit(10, &bank[4]);
+ /* Lots of broken BIOS around that don't clear them
+ by default and leave crap in there. Don't log. */
+ mce_bootlog = 0;
}
+
}
static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
@@ -356,6 +360,9 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
case X86_VENDOR_INTEL:
mce_intel_feature_init(c);
break;
+ case X86_VENDOR_AMD:
+ mce_amd_feature_init(c);
+ break;
default:
break;
}
@@ -495,16 +502,16 @@ static int __init mcheck_disable(char *str)
/* mce=off disables machine check. Note you can reenable it later
using sysfs.
mce=TOLERANCELEVEL (number, see above)
- mce=bootlog Log MCEs from before booting. Disabled by default to work
- around buggy BIOS that leave bogus MCEs. */
+ mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
+ mce=nobootlog Don't log MCEs from before booting. */
static int __init mcheck_enable(char *str)
{
if (*str == '=')
str++;
if (!strcmp(str, "off"))
mce_dont_init = 1;
- else if (!strcmp(str, "bootlog"))
- mce_bootlog = 1;
+ else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
+ mce_bootlog = str[0] == 'b';
else if (isdigit(str[0]))
get_option(&str, &tolerant);
else
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
new file mode 100644
index 00000000000..1f76175ace0
--- /dev/null
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -0,0 +1,538 @@
+/*
+ * (c) 2005 Advanced Micro Devices, Inc.
+ * Your use of this code is subject to the terms and conditions of the
+ * GNU general public license version 2. See "COPYING" or
+ * http://www.gnu.org/licenses/gpl.html
+ *
+ * Written by Jacob Shin - AMD, Inc.
+ *
+ * Support : jacob.shin@amd.com
+ *
+ * MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F.
+ * MC4_MISC0 exists per physical processor.
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kobject.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/sysdev.h>
+#include <linux/sysfs.h>
+#include <asm/apic.h>
+#include <asm/mce.h>
+#include <asm/msr.h>
+#include <asm/percpu.h>
+
+#define PFX "mce_threshold: "
+#define VERSION "version 1.00.9"
+#define NR_BANKS 5
+#define THRESHOLD_MAX 0xFFF
+#define INT_TYPE_APIC 0x00020000
+#define MASK_VALID_HI 0x80000000
+#define MASK_LVTOFF_HI 0x00F00000
+#define MASK_COUNT_EN_HI 0x00080000
+#define MASK_INT_TYPE_HI 0x00060000
+#define MASK_OVERFLOW_HI 0x00010000
+#define MASK_ERR_COUNT_HI 0x00000FFF
+#define MASK_OVERFLOW 0x0001000000000000L
+
+struct threshold_bank {
+ unsigned int cpu;
+ u8 bank;
+ u8 interrupt_enable;
+ u16 threshold_limit;
+ struct kobject kobj;
+};
+
+static struct threshold_bank threshold_defaults = {
+ .interrupt_enable = 0,
+ .threshold_limit = THRESHOLD_MAX,
+};
+
+#ifdef CONFIG_SMP
+static unsigned char shared_bank[NR_BANKS] = {
+ 0, 0, 0, 0, 1
+};
+#endif
+
+static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
+
+/*
+ * CPU Initialization
+ */
+
+/* must be called with correct cpu affinity */
+static void threshold_restart_bank(struct threshold_bank *b,
+ int reset, u16 old_limit)
+{
+ u32 mci_misc_hi, mci_misc_lo;
+
+ rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
+
+ if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
+ reset = 1; /* limit cannot be lower than err count */
+
+ if (reset) { /* reset err count and overflow bit */
+ mci_misc_hi =
+ (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
+ (THRESHOLD_MAX - b->threshold_limit);
+ } else if (old_limit) { /* change limit w/o reset */
+ int new_count = (mci_misc_hi & THRESHOLD_MAX) +
+ (old_limit - b->threshold_limit);
+ mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
+ (new_count & THRESHOLD_MAX);
+ }
+
+ b->interrupt_enable ?
+ (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
+ (mci_misc_hi &= ~MASK_INT_TYPE_HI);
+
+ mci_misc_hi |= MASK_COUNT_EN_HI;
+ wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
+}
+
+void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
+{
+ int bank;
+ u32 mci_misc_lo, mci_misc_hi;
+ unsigned int cpu = smp_processor_id();
+
+ for (bank = 0; bank < NR_BANKS; ++bank) {
+ rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi);
+
+ /* !valid, !counter present, bios locked */
+ if (!(mci_misc_hi & MASK_VALID_HI) ||
+ !(mci_misc_hi & MASK_VALID_HI >> 1) ||
+ (mci_misc_hi & MASK_VALID_HI >> 2))
+ continue;
+
+ per_cpu(bank_map, cpu) |= (1 << bank);
+
+#ifdef CONFIG_SMP
+ if (shared_bank[bank] && cpu_core_id[cpu])
+ continue;
+#endif
+
+ setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20);
+ threshold_defaults.cpu = cpu;
+ threshold_defaults.bank = bank;
+ threshold_restart_bank(&threshold_defaults, 0, 0);
+ }
+}
+
+/*
+ * APIC Interrupt Handler
+ */
+
+/*
+ * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
+ * the interrupt goes off when error_count reaches threshold_limit.
+ * the handler will simply log mcelog w/ software defined bank number.
+ */
+asmlinkage void mce_threshold_interrupt(void)
+{
+ int bank;
+ struct mce m;
+
+ ack_APIC_irq();
+ irq_enter();
+
+ memset(&m, 0, sizeof(m));
+ rdtscll(m.tsc);
+ m.cpu = smp_processor_id();
+
+ /* assume first bank caused it */
+ for (bank = 0; bank < NR_BANKS; ++bank) {
+ m.bank = MCE_THRESHOLD_BASE + bank;
+ rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc);
+
+ if (m.misc & MASK_OVERFLOW) {
+ mce_log(&m);
+ goto out;
+ }
+ }
+ out:
+ irq_exit();
+}
+
+/*
+ * Sysfs Interface
+ */
+
+static struct sysdev_class threshold_sysclass = {
+ set_kset_name("threshold"),
+};
+
+static DEFINE_PER_CPU(struct sys_device, device_threshold);
+
+struct threshold_attr {
+ struct attribute attr;
+ ssize_t(*show) (struct threshold_bank *, char *);
+ ssize_t(*store) (struct threshold_bank *, const char *, size_t count);
+};
+
+static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
+
+static cpumask_t affinity_set(unsigned int cpu)
+{
+ cpumask_t oldmask = current->cpus_allowed;
+ cpumask_t newmask = CPU_MASK_NONE;
+ cpu_set(cpu, newmask);
+ set_cpus_allowed(current, newmask);
+ return oldmask;
+}
+
+static void affinity_restore(cpumask_t oldmask)
+{
+ set_cpus_allowed(current, oldmask);
+}
+
+#define SHOW_FIELDS(name) \
+ static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \
+ { \
+ return sprintf(buf, "%lx\n", (unsigned long) b->name); \
+ }
+SHOW_FIELDS(interrupt_enable)
+SHOW_FIELDS(threshold_limit)
+
+static ssize_t store_interrupt_enable(struct threshold_bank *b,
+ const char *buf, size_t count)
+{
+ char *end;
+ cpumask_t oldmask;
+ unsigned long new = simple_strtoul(buf, &end, 0);
+ if (end == buf)
+ return -EINVAL;
+ b->interrupt_enable = !!new;
+
+ oldmask = affinity_set(b->cpu);
+ threshold_restart_bank(b, 0, 0);
+ affinity_restore(oldmask);
+
+ return end - buf;
+}
+
+static ssize_t store_threshold_limit(struct threshold_bank *b,
+ const char *buf, size_t count)
+{
+ char *end;
+ cpumask_t oldmask;
+ u16 old;
+ unsigned long new = simple_strtoul(buf, &end, 0);
+ if (end == buf)
+ return -EINVAL;
+ if (new > THRESHOLD_MAX)
+ new = THRESHOLD_MAX;
+ if (new < 1)
+ new = 1;
+ old = b->threshold_limit;
+ b->threshold_limit = new;
+
+ oldmask = affinity_set(b->cpu);
+ threshold_restart_bank(b, 0, old);
+ affinity_restore(oldmask);
+
+ return end - buf;
+}
+
+static ssize_t show_error_count(struct threshold_bank *b, char *buf)
+{
+ u32 high, low;
+ cpumask_t oldmask;
+ oldmask = affinity_set(b->cpu);
+ rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */
+ affinity_restore(oldmask);
+ return sprintf(buf, "%x\n",
+ (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
+}
+
+static ssize_t store_error_count(struct threshold_bank *b,
+ const char *buf, size_t count)
+{
+ cpumask_t oldmask;
+ oldmask = affinity_set(b->cpu);
+ threshold_restart_bank(b, 1, 0);
+ affinity_restore(oldmask);
+ return 1;
+}
+
+#define THRESHOLD_ATTR(_name,_mode,_show,_store) { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define ATTR_FIELDS(name) \
+ static struct threshold_attr name = \
+ THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
+
+ATTR_FIELDS(interrupt_enable);
+ATTR_FIELDS(threshold_limit);
+ATTR_FIELDS(error_count);
+
+static struct attribute *default_attrs[] = {
+ &interrupt_enable.attr,
+ &threshold_limit.attr,
+ &error_count.attr,
+ NULL
+};
+
+#define to_bank(k) container_of(k,struct threshold_bank,kobj)
+#define to_attr(a) container_of(a,struct threshold_attr,attr)
+
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct threshold_bank *b = to_bank(kobj);
+ struct threshold_attr *a = to_attr(attr);
+ ssize_t ret;
+ ret = a->show ? a->show(b, buf) : -EIO;
+ return ret;
+}
+
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct threshold_bank *b = to_bank(kobj);
+ struct threshold_attr *a = to_attr(attr);
+ ssize_t ret;
+ ret = a->store ? a->store(b, buf, count) : -EIO;
+ return ret;
+}
+
+static struct sysfs_ops threshold_ops = {
+ .show = show,
+ .store = store,
+};
+
+static struct kobj_type threshold_ktype = {
+ .sysfs_ops = &threshold_ops,
+ .default_attrs = default_attrs,
+};
+
+/* symlinks sibling shared banks to first core. first core owns dir/files. */
+static __cpuinit int threshold_create_bank(unsigned int cpu, int bank)
+{
+ int err = 0;
+ struct threshold_bank *b = 0;
+
+#ifdef CONFIG_SMP
+ if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */
+ char name[16];
+ unsigned lcpu = first_cpu(cpu_core_map[cpu]);
+ if (cpu_core_id[lcpu])
+ goto out; /* first core not up yet */
+
+ b = per_cpu(threshold_banks, lcpu)[bank];
+ if (!b)
+ goto out;
+ sprintf(name, "bank%i", bank);
+ err = sysfs_create_link(&per_cpu(device_threshold, cpu).kobj,
+ &b->kobj, name);
+ if (err)
+ goto out;
+ per_cpu(threshold_banks, cpu)[bank] = b;
+ goto out;
+ }
+#endif
+
+ b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL);
+ if (!b) {
+ err = -ENOMEM;
+ goto out;
+ }
+ memset(b, 0, sizeof(struct threshold_bank));
+
+ b->cpu = cpu;
+ b->bank = bank;
+ b->interrupt_enable = 0;
+ b->threshold_limit = THRESHOLD_MAX;
+ kobject_set_name(&b->kobj, "bank%i", bank);
+ b->kobj.parent = &per_cpu(device_threshold, cpu).kobj;
+ b->kobj.ktype = &threshold_ktype;
+
+ err = kobject_register(&b->kobj);
+ if (err) {
+ kfree(b);
+ goto out;
+ }
+ per_cpu(threshold_banks, cpu)[bank] = b;
+ out:
+ return err;
+}
+
+/* create dir/files for all valid threshold banks */
+static __cpuinit int threshold_create_device(unsigned int cpu)
+{
+ int bank;
+ int err = 0;
+
+ per_cpu(device_threshold, cpu).id = cpu;
+ per_cpu(device_threshold, cpu).cls = &threshold_sysclass;
+ err = sysdev_register(&per_cpu(device_threshold, cpu));
+ if (err)
+ goto out;
+
+ for (bank = 0; bank < NR_BANKS; ++bank) {
+ if (!(per_cpu(bank_map, cpu) & 1 << bank))
+ continue;
+ err = threshold_create_bank(cpu, bank);
+ if (err)
+ goto out;
+ }
+ out:
+ return err;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * let's be hotplug friendly.
+ * in case of multiple core processors, the first core always takes ownership
+ * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
+ */
+
+/* cpu hotplug call removes all symlinks before first core dies */
+static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
+{
+ struct threshold_bank *b;
+ char name[16];
+
+ b = per_cpu(threshold_banks, cpu)[bank];
+ if (!b)
+ return;
+ if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) {
+ sprintf(name, "bank%i", bank);
+ sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name);
+ per_cpu(threshold_banks, cpu)[bank] = 0;
+ } else {
+ kobject_unregister(&b->kobj);
+ kfree(per_cpu(threshold_banks, cpu)[bank]);
+ }
+}
+
+static __cpuinit void threshold_remove_device(unsigned int cpu)
+{
+ int bank;
+
+ for (bank = 0; bank < NR_BANKS; ++bank) {
+ if (!(per_cpu(bank_map, cpu) & 1 << bank))
+ continue;
+ threshold_remove_bank(cpu, bank);
+ }
+ sysdev_unregister(&per_cpu(device_threshold, cpu));
+}
+
+/* link all existing siblings when first core comes up */
+static __cpuinit int threshold_create_symlinks(unsigned int cpu)
+{
+ int bank, err = 0;
+ unsigned int lcpu = 0;
+
+ if (cpu_core_id[cpu])
+ return 0;
+ for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
+ if (lcpu == cpu)
+ continue;
+ for (bank = 0; bank < NR_BANKS; ++bank) {
+ if (!(per_cpu(bank_map, cpu) & 1 << bank))
+ continue;
+ if (!shared_bank[bank])
+ continue;
+ err = threshold_create_bank(lcpu, bank);
+ }
+ }
+ return err;
+}
+
+/* remove all symlinks before first core dies. */
+static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
+{
+ int bank;
+ unsigned int lcpu = 0;
+ if (cpu_core_id[cpu])
+ return;
+ for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
+ if (lcpu == cpu)
+ continue;
+ for (bank = 0; bank < NR_BANKS; ++bank) {
+ if (!(per_cpu(bank_map, cpu) & 1 << bank))
+ continue;
+ if (!shared_bank[bank])
+ continue;
+ threshold_remove_bank(lcpu, bank);
+ }
+ }
+}
+#else /* !CONFIG_HOTPLUG_CPU */
+static __cpuinit void threshold_create_symlinks(unsigned int cpu)
+{
+}
+static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
+{
+}
+static void threshold_remove_device(unsigned int cpu)
+{
+}
+#endif
+
+/* get notified when a cpu comes on/off */
+static __cpuinit int threshold_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ /* cpu was unsigned int to begin with */
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if (cpu >= NR_CPUS)
+ goto out;
+
+ switch (action) {
+ case CPU_ONLINE:
+ threshold_create_device(cpu);
+ threshold_create_symlinks(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ threshold_remove_symlinks(cpu);
+ break;
+ case CPU_DOWN_FAILED:
+ threshold_create_symlinks(cpu);
+ break;
+ case CPU_DEAD:
+ threshold_remove_device(cpu);
+ break;
+ default:
+ break;
+ }
+ out:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block threshold_cpu_notifier = {
+ .notifier_call = threshold_cpu_callback,
+};
+
+static __init int threshold_init_device(void)
+{
+ int err;
+ int lcpu = 0;
+
+ err = sysdev_class_register(&threshold_sysclass);
+ if (err)
+ goto out;
+
+ /* to hit CPUs online before the notifier is up */
+ for_each_online_cpu(lcpu) {
+ err = threshold_create_device(lcpu);
+ if (err)
+ goto out;
+ }
+ register_cpu_notifier(&threshold_cpu_notifier);
+
+ out:
+ return err;
+}
+
+device_initcall(threshold_init_device);
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index f16d38d09da..1105250bf02 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -42,7 +42,7 @@ int acpi_found_madt;
* Various Linux-internal data structures created from the
* MP-table.
*/
-int apic_version [MAX_APICS];
+unsigned char apic_version [MAX_APICS];
unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
@@ -65,7 +65,9 @@ unsigned long mp_lapic_addr = 0;
/* Processor that is doing the boot up */
unsigned int boot_cpu_id = -1U;
/* Internal processor count */
-static unsigned int num_processors = 0;
+unsigned int num_processors __initdata = 0;
+
+unsigned disabled_cpus __initdata;
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
@@ -106,11 +108,14 @@ static int __init mpf_checksum(unsigned char *mp, int len)
static void __init MP_processor_info (struct mpc_config_processor *m)
{
- int ver, cpu;
+ int cpu;
+ unsigned char ver;
static int found_bsp=0;
- if (!(m->mpc_cpuflag & CPU_ENABLED))
+ if (!(m->mpc_cpuflag & CPU_ENABLED)) {
+ disabled_cpus++;
return;
+ }
printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
m->mpc_apicid,
@@ -129,12 +134,14 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
}
cpu = num_processors++;
-
- if (m->mpc_apicid > MAX_APICS) {
+
+#if MAX_APICS < 255
+ if ((int)m->mpc_apicid > MAX_APICS) {
printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
m->mpc_apicid, MAX_APICS);
return;
}
+#endif
ver = m->mpc_apicver;
physid_set(m->mpc_apicid, phys_cpu_present_map);
@@ -218,7 +225,7 @@ static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
m->mpc_irqtype, m->mpc_irqflag & 3,
(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
- if (++mp_irq_entries == MAX_IRQ_SOURCES)
+ if (++mp_irq_entries >= MAX_IRQ_SOURCES)
panic("Max # of irq sources exceeded!!\n");
}
@@ -549,7 +556,7 @@ void __init get_smp_config (void)
* Read the physical hardware table. Anything here will
* override the defaults.
*/
- if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) {
+ if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
smp_found_config = 0;
printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 88be97c9698..2e28e855ec3 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -220,6 +220,12 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
uses the normal dma_mask for alloc_coherent. */
dma_mask &= *dev->dma_mask;
+ /* Why <=? Even when the mask is smaller than 4GB it is often larger
+ than 16MB and in this case we have a chance of finding fitting memory
+ in the next higher zone first. If not retry with true GFP_DMA. -AK */
+ if (dma_mask <= 0xffffffff)
+ gfp |= GFP_DMA32;
+
again:
memory = dma_alloc_pages(dev, gfp, get_order(size));
if (memory == NULL)
@@ -245,7 +251,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
}
if (!(gfp & GFP_DMA)) {
- gfp |= GFP_DMA;
+ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
goto again;
}
return NULL;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 59be85d9a4b..5afd63e8cef 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -144,7 +144,8 @@ void cpu_idle_wait(void)
do {
ssleep(1);
for_each_online_cpu(cpu) {
- if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
+ if (cpu_isset(cpu, map) &&
+ !per_cpu(cpu_idle_state, cpu))
cpu_clear(cpu, map);
}
cpus_and(map, map, cpu_online_map);
@@ -275,7 +276,8 @@ void __show_regs(struct pt_regs * regs)
system_utsname.version);
printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
printk_address(regs->rip);
- printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
+ printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
+ regs->eflags);
printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
regs->rax, regs->rbx, regs->rcx);
printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
@@ -427,15 +429,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
struct pt_regs * childregs;
struct task_struct *me = current;
- childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
-
+ childregs = ((struct pt_regs *)
+ (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
*childregs = *regs;
childregs->rax = 0;
childregs->rsp = rsp;
- if (rsp == ~0UL) {
+ if (rsp == ~0UL)
childregs->rsp = (unsigned long)childregs;
- }
p->thread.rsp = (unsigned long) childregs;
p->thread.rsp0 = (unsigned long) (childregs+1);
@@ -457,7 +458,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
p->thread.io_bitmap_max = 0;
return -ENOMEM;
}
- memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
+ memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
+ IO_BITMAP_BYTES);
}
/*
@@ -494,7 +496,8 @@ out:
* - fold all the options into a flag word and test it with a single test.
* - could test fs/gs bitsliced
*/
-struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+struct task_struct *
+__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
@@ -565,7 +568,8 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
prev->userrsp = read_pda(oldrsp);
write_pda(oldrsp, next->userrsp);
write_pda(pcurrent, next_p);
- write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
+ write_pda(kernelstack,
+ (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
/*
* Now maybe reload the debug registers
@@ -646,7 +650,9 @@ asmlinkage long sys_fork(struct pt_regs *regs)
return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
}
-asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
+asmlinkage long
+sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
{
if (!newsp)
newsp = regs->rsp;
@@ -682,7 +688,8 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
fp = *(u64 *)(p->thread.rsp);
do {
- if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
+ if (fp < (unsigned long)stack ||
+ fp > (unsigned long)stack+THREAD_SIZE)
return 0;
rip = *(u64 *)(fp+8);
if (!in_sched_functions(rip))
@@ -717,8 +724,8 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
task->thread.gsindex = 0;
task->thread.gs = addr;
if (doit) {
- load_gs_index(0);
- ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
+ load_gs_index(0);
+ ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
}
}
put_cpu();
@@ -735,7 +742,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
set_32bit_tls(task, FS_TLS, addr);
if (doit) {
load_TLS(&task->thread, cpu);
- asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
+ asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
}
task->thread.fsindex = FS_TLS_SEL;
task->thread.fs = 0;
@@ -745,8 +752,8 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
if (doit) {
/* set the selector to 0 to not confuse
__switch_to */
- asm volatile("movl %0,%%fs" :: "r" (0));
- ret = checking_wrmsrl(MSR_FS_BASE, addr);
+ asm volatile("movl %0,%%fs" :: "r" (0));
+ ret = checking_wrmsrl(MSR_FS_BASE, addr);
}
}
put_cpu();
@@ -755,9 +762,9 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
unsigned long base;
if (task->thread.fsindex == FS_TLS_SEL)
base = read_32bit_tls(task, FS_TLS);
- else if (doit) {
+ else if (doit)
rdmsrl(MSR_FS_BASE, base);
- } else
+ else
base = task->thread.fs;
ret = put_user(base, (unsigned long __user *)addr);
break;
@@ -766,9 +773,9 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
unsigned long base;
if (task->thread.gsindex == GS_TLS_SEL)
base = read_32bit_tls(task, GS_TLS);
- else if (doit) {
+ else if (doit)
rdmsrl(MSR_KERNEL_GS_BASE, base);
- } else
+ else
base = task->thread.gs;
ret = put_user(base, (unsigned long __user *)addr);
break;
diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c
index 47f95687905..75235ed2b31 100644
--- a/arch/x86_64/kernel/reboot.c
+++ b/arch/x86_64/kernel/reboot.c
@@ -77,6 +77,7 @@ static inline void kb_wait(void)
void machine_shutdown(void)
{
+ unsigned long flags;
/* Stop the cpus and apics */
#ifdef CONFIG_SMP
int reboot_cpu_id;
@@ -98,7 +99,7 @@ void machine_shutdown(void)
smp_send_stop();
#endif
- local_irq_disable();
+ local_irq_save(flags);
#ifndef CONFIG_SMP
disable_local_APIC();
@@ -106,7 +107,7 @@ void machine_shutdown(void)
disable_IO_APIC();
- local_irq_enable();
+ local_irq_restore(flags);
}
void machine_emergency_restart(void)
@@ -120,7 +121,7 @@ void machine_emergency_restart(void)
/* Could also try the reset bit in the Hammer NB */
switch (reboot_type) {
case BOOT_KBD:
- for (i=0; i<100; i++) {
+ for (i=0; i<10; i++) {
kb_wait();
udelay(50);
outb(0xfe,0x64); /* pulse reset low */
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index da0bc3e7bdf..750e01dcbdf 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -60,6 +60,7 @@
#include <asm/setup.h>
#include <asm/mach_apic.h>
#include <asm/numa.h>
+#include <asm/sections.h>
/*
* Machine setup..
@@ -103,7 +104,6 @@ struct edid_info edid_info;
struct e820map e820;
extern int root_mountflags;
-extern char _text, _etext, _edata, _end;
char command_line[COMMAND_LINE_SIZE];
@@ -412,7 +412,6 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long bootmap_size, bootmap;
- memory_present(0, start_pfn, end_pfn);
bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
if (bootmap == -1L)
@@ -571,6 +570,8 @@ void __init setup_arch(char **cmdline_p)
init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
+ zap_low_mappings(0);
+
#ifdef CONFIG_ACPI
/*
* Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
@@ -657,8 +658,6 @@ void __init setup_arch(char **cmdline_p)
}
#endif
- sparse_init();
-
paging_init();
check_ioapic();
@@ -793,7 +792,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
#endif
bits = 0;
- while ((1 << bits) < c->x86_num_cores)
+ while ((1 << bits) < c->x86_max_cores)
bits++;
/* Low order bits define the core id (index of core in socket) */
@@ -823,10 +822,10 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
if (!node_online(node))
node = nearby_node(apicid);
}
- cpu_to_node[cpu] = node;
+ numa_set_node(cpu, node);
printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
- cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
+ cpu, c->x86_max_cores, node, cpu_core_id[cpu]);
#endif
#endif
}
@@ -875,9 +874,9 @@ static int __init init_amd(struct cpuinfo_x86 *c)
display_cacheinfo(c);
if (c->extended_cpuid_level >= 0x80000008) {
- c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
- if (c->x86_num_cores & (c->x86_num_cores - 1))
- c->x86_num_cores = 1;
+ c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+ if (c->x86_max_cores & (c->x86_max_cores - 1))
+ c->x86_max_cores = 1;
amd_detect_cmp(c);
}
@@ -889,54 +888,44 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx;
- int index_msb, tmp;
+ int index_msb, core_bits;
int cpu = smp_processor_id();
-
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+
+ c->apicid = phys_pkg_id(0);
+
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;
- cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16;
-
+
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
- } else if (smp_num_siblings > 1) {
- index_msb = 31;
- /*
- * At this point we only support two siblings per
- * processor package.
- */
+ } else if (smp_num_siblings > 1 ) {
+
if (smp_num_siblings > NR_CPUS) {
printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
smp_num_siblings = 1;
return;
}
- tmp = smp_num_siblings;
- while ((tmp & 0x80000000 ) == 0) {
- tmp <<=1 ;
- index_msb--;
- }
- if (smp_num_siblings & (smp_num_siblings - 1))
- index_msb++;
+
+ index_msb = get_count_order(smp_num_siblings);
phys_proc_id[cpu] = phys_pkg_id(index_msb);
-
+
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]);
- smp_num_siblings = smp_num_siblings / c->x86_num_cores;
+ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
- tmp = smp_num_siblings;
- index_msb = 31;
- while ((tmp & 0x80000000) == 0) {
- tmp <<=1 ;
- index_msb--;
- }
- if (smp_num_siblings & (smp_num_siblings - 1))
- index_msb++;
+ index_msb = get_count_order(smp_num_siblings) ;
- cpu_core_id[cpu] = phys_pkg_id(index_msb);
+ core_bits = get_count_order(c->x86_max_cores);
- if (c->x86_num_cores > 1)
+ cpu_core_id[cpu] = phys_pkg_id(index_msb) &
+ ((1 << core_bits) - 1);
+
+ if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
cpu_core_id[cpu]);
}
@@ -975,7 +964,7 @@ static void srat_detect_node(void)
node = apicid_to_node[hard_smp_processor_id()];
if (node == NUMA_NO_NODE)
node = 0;
- cpu_to_node[cpu] = node;
+ numa_set_node(cpu, node);
if (acpi_numa > 0)
printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
@@ -993,13 +982,18 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
unsigned eax = cpuid_eax(0x80000008);
c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
+ /* CPUID workaround for Intel 0F34 CPU */
+ if (c->x86_vendor == X86_VENDOR_INTEL &&
+ c->x86 == 0xF && c->x86_model == 0x3 &&
+ c->x86_mask == 0x4)
+ c->x86_phys_bits = 36;
}
if (c->x86 == 15)
c->x86_cache_alignment = c->x86_clflush_size * 2;
if (c->x86 >= 15)
set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
- c->x86_num_cores = intel_num_cpu_cores(c);
+ c->x86_max_cores = intel_num_cpu_cores(c);
srat_detect_node();
}
@@ -1037,7 +1031,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_clflush_size = 64;
c->x86_cache_alignment = c->x86_clflush_size;
- c->x86_num_cores = 1;
+ c->x86_max_cores = 1;
c->extended_cpuid_level = 0;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
@@ -1060,10 +1054,10 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
c->x86 = (tfms >> 8) & 0xf;
c->x86_model = (tfms >> 4) & 0xf;
c->x86_mask = tfms & 0xf;
- if (c->x86 == 0xf) {
+ if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
+ if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
- }
if (c->x86_capability[0] & (1<<19))
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
} else {
@@ -1271,13 +1265,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
#ifdef CONFIG_SMP
- if (smp_num_siblings * c->x86_num_cores > 1) {
+ if (smp_num_siblings * c->x86_max_cores > 1) {
int cpu = c - cpu_data;
seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
- seq_printf(m, "siblings\t: %d\n",
- c->x86_num_cores * smp_num_siblings);
+ seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
- seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
+ seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
#endif
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 79190891fbc..06dc354375c 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -141,7 +141,6 @@ void pda_init(int cpu)
panic("cannot allocate irqstack for cpu %d", cpu);
}
- asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
pda->irqstackptr += IRQSTACKSIZE-64;
}
@@ -197,6 +196,7 @@ void __cpuinit cpu_init (void)
/* CPU 0 is initialised in head64.c */
if (cpu != 0) {
pda_init(cpu);
+ zap_low_mappings(cpu);
} else
estacks = boot_exception_stacks;
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index d642fbf3da2..5876df116c9 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -110,6 +110,15 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned
COPY(r14);
COPY(r15);
+ /* Kernel saves and restores only the CS segment register on signals,
+ * which is the bare minimum needed to allow mixed 32/64-bit code.
+ * App's signal handler can save/restore other segments if needed. */
+ {
+ unsigned cs;
+ err |= __get_user(cs, &sc->cs);
+ regs->cs = cs | 3; /* Force into user mode */
+ }
+
{
unsigned int tmpflags;
err |= __get_user(tmpflags, &sc->eflags);
@@ -187,6 +196,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
{
int err = 0;
+ err |= __put_user(regs->cs, &sc->cs);
err |= __put_user(0, &sc->gs);
err |= __put_user(0, &sc->fs);
@@ -318,7 +328,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->rsp = (unsigned long)frame;
+ /* Set up the CS register to run signal handlers in 64-bit mode,
+ even if the handler happens to be interrupting 32-bit code. */
+ regs->cs = __USER_CS;
+
+ /* This, by contrast, has nothing to do with segment registers -
+ see include/asm-x86_64/uaccess.h for details. */
set_fs(USER_DS);
+
regs->eflags &= ~TF_MASK;
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 9db9dda161b..cfc3d9dccbd 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -28,8 +28,6 @@
#include <asm/proto.h>
#include <asm/apicdef.h>
-#define __cpuinit __init
-
/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
@@ -452,13 +450,14 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
void smp_stop_cpu(void)
{
+ unsigned long flags;
/*
* Remove this CPU:
*/
cpu_clear(smp_processor_id(), cpu_online_map);
- local_irq_disable();
+ local_irq_save(flags);
disable_local_APIC();
- local_irq_enable();
+ local_irq_restore(flags);
}
static void smp_really_stop_cpu(void *dummy)
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index c4e59bbdc18..683c33f7b96 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -64,6 +64,7 @@
int smp_num_siblings = 1;
/* Package ID of each logical CPU */
u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
+/* core ID of each logical CPU */
u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
/* Bitmask of currently online CPUs */
@@ -87,7 +88,10 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
/* Set when the idlers are all forked */
int smp_threads_ready;
+/* representing HT siblings of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+
+/* representing HT and core siblings of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
@@ -434,30 +438,59 @@ void __cpuinit smp_callin(void)
cpu_set(cpuid, cpu_callin_map);
}
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
+
static inline void set_cpu_sibling_map(int cpu)
{
int i;
+ struct cpuinfo_x86 *c = cpu_data;
+
+ cpu_set(cpu, cpu_sibling_setup_map);
if (smp_num_siblings > 1) {
- for_each_cpu(i) {
- if (cpu_core_id[cpu] == cpu_core_id[i]) {
+ for_each_cpu_mask(i, cpu_sibling_setup_map) {
+ if (phys_proc_id[cpu] == phys_proc_id[i] &&
+ cpu_core_id[cpu] == cpu_core_id[i]) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
+ cpu_set(i, cpu_core_map[cpu]);
+ cpu_set(cpu, cpu_core_map[i]);
}
}
} else {
cpu_set(cpu, cpu_sibling_map[cpu]);
}
- if (current_cpu_data.x86_num_cores > 1) {
- for_each_cpu(i) {
- if (phys_proc_id[cpu] == phys_proc_id[i]) {
- cpu_set(i, cpu_core_map[cpu]);
- cpu_set(cpu, cpu_core_map[i]);
- }
- }
- } else {
+ if (current_cpu_data.x86_max_cores == 1) {
cpu_core_map[cpu] = cpu_sibling_map[cpu];
+ c[cpu].booted_cores = 1;
+ return;
+ }
+
+ for_each_cpu_mask(i, cpu_sibling_setup_map) {
+ if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ cpu_set(i, cpu_core_map[cpu]);
+ cpu_set(cpu, cpu_core_map[i]);
+ /*
+ * Does this new cpu bringup a new core?
+ */
+ if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
+ /*
+ * for each core in package, increment
+ * the booted_cores for this new cpu
+ */
+ if (first_cpu(cpu_sibling_map[i]) == i)
+ c[cpu].booted_cores++;
+ /*
+ * increment the core count for all
+ * the other cpus in this package
+ */
+ if (i != cpu)
+ c[i].booted_cores++;
+ } else if (i != cpu && !c[cpu].booted_cores)
+ c[cpu].booted_cores = c[i].booted_cores;
+ }
}
}
@@ -879,6 +912,9 @@ static __init void disable_smp(void)
}
#ifdef CONFIG_HOTPLUG_CPU
+
+int additional_cpus __initdata = -1;
+
/*
* cpu_possible_map should be static, it cannot change as cpu's
* are onlined, or offlined. The reason is per-cpu data-structures
@@ -887,14 +923,38 @@ static __init void disable_smp(void)
* cpu_present_map on the other hand can change dynamically.
* In case when cpu_hotplug is not compiled, then we resort to current
* behaviour, which is cpu_possible == cpu_present.
- * If cpu-hotplug is supported, then we need to preallocate for all
- * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range.
* - Ashok Raj
+ *
+ * Three ways to find out the number of additional hotplug CPUs:
+ * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
+ * - otherwise use half of the available CPUs or 2, whatever is more.
+ * - The user can overwrite it with additional_cpus=NUM
+ * We do this because additional CPUs waste a lot of memory.
+ * -AK
*/
__init void prefill_possible_map(void)
{
int i;
- for (i = 0; i < NR_CPUS; i++)
+ int possible;
+
+ if (additional_cpus == -1) {
+ if (disabled_cpus > 0) {
+ additional_cpus = disabled_cpus;
+ } else {
+ additional_cpus = num_processors / 2;
+ if (additional_cpus == 0)
+ additional_cpus = 2;
+ }
+ }
+ possible = num_processors + additional_cpus;
+ if (possible > NR_CPUS)
+ possible = NR_CPUS;
+
+ printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
+ possible,
+ max_t(int, possible - num_processors, 0));
+
+ for (i = 0; i < possible; i++)
cpu_set(i, cpu_possible_map);
}
#endif
@@ -965,6 +1025,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
nmi_watchdog_default();
current_cpu_data = boot_cpu_data;
current_thread_info()->cpu = 0; /* needed? */
+ set_cpu_sibling_map(0);
if (smp_sanity_check(max_cpus) < 0) {
printk(KERN_INFO "SMP disabled\n");
@@ -1008,8 +1069,6 @@ void __init smp_prepare_boot_cpu(void)
int me = smp_processor_id();
cpu_set(me, cpu_online_map);
cpu_set(me, cpu_callout_map);
- cpu_set(0, cpu_sibling_map[0]);
- cpu_set(0, cpu_core_map[0]);
per_cpu(cpu_state, me) = CPU_ONLINE;
}
@@ -1062,9 +1121,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
*/
void __init smp_cpus_done(unsigned int max_cpus)
{
-#ifndef CONFIG_HOTPLUG_CPU
- zap_low_mappings();
-#endif
smp_cleanup_boot();
#ifdef CONFIG_X86_IO_APIC
@@ -1081,15 +1137,24 @@ void __init smp_cpus_done(unsigned int max_cpus)
static void remove_siblinginfo(int cpu)
{
int sibling;
+ struct cpuinfo_x86 *c = cpu_data;
+ for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
+ cpu_clear(cpu, cpu_core_map[sibling]);
+ /*
+ * last thread sibling in this cpu core going down
+ */
+ if (cpus_weight(cpu_sibling_map[cpu]) == 1)
+ c[sibling].booted_cores--;
+ }
+
for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
cpu_clear(cpu, cpu_sibling_map[sibling]);
- for_each_cpu_mask(sibling, cpu_core_map[cpu])
- cpu_clear(cpu, cpu_core_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
phys_proc_id[cpu] = BAD_APICID;
cpu_core_id[cpu] = BAD_APICID;
+ cpu_clear(cpu, cpu_sibling_setup_map);
}
void remove_cpu_from_maps(void)
@@ -1153,6 +1218,12 @@ void __cpu_die(unsigned int cpu)
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
}
+static __init int setup_additional_cpus(char *s)
+{
+ return get_option(&s, &additional_cpus);
+}
+__setup("additional_cpus=", setup_additional_cpus);
+
#else /* ... !CONFIG_HOTPLUG_CPU */
int __cpu_disable(void)
diff --git a/arch/x86_64/kernel/sys_x86_64.c b/arch/x86_64/kernel/sys_x86_64.c
index cc7821c6885..6449ea8fe75 100644
--- a/arch/x86_64/kernel/sys_x86_64.c
+++ b/arch/x86_64/kernel/sys_x86_64.c
@@ -154,17 +154,3 @@ asmlinkage long sys_uname(struct new_utsname __user * name)
err |= copy_to_user(&name->machine, "i686", 5);
return err ? -EFAULT : 0;
}
-
-asmlinkage long sys_time64(long __user * tloc)
-{
- struct timeval now;
- int i;
-
- do_gettimeofday(&now);
- i = now.tv_sec;
- if (tloc) {
- if (put_user(i,tloc))
- i = -EFAULT;
- }
- return i;
-}
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index b5e09e6b553..bf337f49318 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -428,19 +428,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
{
conditional_sti(regs);
-#ifdef CONFIG_CHECKING
- {
- unsigned long gs;
- struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
- rdmsrl(MSR_GS_BASE, gs);
- if (gs != (unsigned long)pda) {
- wrmsrl(MSR_GS_BASE, pda);
- printk("%s: wrong gs %lx expected %p rip %lx\n", str, gs, pda,
- regs->rip);
- }
- }
-#endif
-
if (user_mode(regs)) {
struct task_struct *tsk = current;
@@ -513,20 +500,6 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
{
conditional_sti(regs);
-#ifdef CONFIG_CHECKING
- {
- unsigned long gs;
- struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
- rdmsrl(MSR_GS_BASE, gs);
- if (gs != (unsigned long)pda) {
- wrmsrl(MSR_GS_BASE, pda);
- oops_in_progress++;
- printk("general protection handler: wrong gs %lx expected %p\n", gs, pda);
- oops_in_progress--;
- }
- }
-#endif
-
if (user_mode(regs)) {
struct task_struct *tsk = current;
@@ -665,19 +638,6 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
struct task_struct *tsk = current;
siginfo_t info;
-#ifdef CONFIG_CHECKING
- {
- /* RED-PEN interaction with debugger - could destroy gs */
- unsigned long gs;
- struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
- rdmsrl(MSR_GS_BASE, gs);
- if (gs != (unsigned long)pda) {
- wrmsrl(MSR_GS_BASE, pda);
- printk("debug handler: wrong gs %lx expected %p\n", gs, pda);
- }
- }
-#endif
-
get_debugreg(condition, 6);
if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
@@ -888,6 +848,10 @@ asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
{
}
+asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
+{
+}
+
/*
* 'math_state_restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 6dd642cad2e..58b19215b4b 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -50,7 +50,7 @@ SECTIONS
*(.bss.page_aligned)
*(.bss)
}
- __bss_end = .;
+ __bss_stop = .;
. = ALIGN(PAGE_SIZE);
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index fd99ddd009b..4a54221e10b 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -203,3 +203,6 @@ EXPORT_SYMBOL(flush_tlb_page);
#endif
EXPORT_SYMBOL(cpu_khz);
+
+EXPORT_SYMBOL(load_gs_index);
+
diff --git a/arch/x86_64/lib/clear_page.S b/arch/x86_64/lib/clear_page.S
index 30a9da458c1..43d9fa13618 100644
--- a/arch/x86_64/lib/clear_page.S
+++ b/arch/x86_64/lib/clear_page.S
@@ -5,46 +5,8 @@
.globl clear_page
.p2align 4
clear_page:
- xorl %eax,%eax
- movl $4096/64,%ecx
- .p2align 4
-.Lloop:
- decl %ecx
-#define PUT(x) movq %rax,x*8(%rdi)
- movq %rax,(%rdi)
- PUT(1)
- PUT(2)
- PUT(3)
- PUT(4)
- PUT(5)
- PUT(6)
- PUT(7)
- leaq 64(%rdi),%rdi
- jnz .Lloop
- nop
- ret
-clear_page_end:
-
- /* C stepping K8 run faster using the string instructions.
- It is also a lot simpler. Use this when possible */
-
-#include <asm/cpufeature.h>
-
- .section .altinstructions,"a"
- .align 8
- .quad clear_page
- .quad clear_page_c
- .byte X86_FEATURE_K8_C
- .byte clear_page_end-clear_page
- .byte clear_page_c_end-clear_page_c
- .previous
-
- .section .altinstr_replacement,"ax"
-clear_page_c:
movl $4096/8,%ecx
xorl %eax,%eax
rep
stosq
ret
-clear_page_c_end:
- .previous
diff --git a/arch/x86_64/lib/copy_page.S b/arch/x86_64/lib/copy_page.S
index dd3aa47b6bf..621a1976940 100644
--- a/arch/x86_64/lib/copy_page.S
+++ b/arch/x86_64/lib/copy_page.S
@@ -8,94 +8,7 @@
.globl copy_page
.p2align 4
copy_page:
- subq $3*8,%rsp
- movq %rbx,(%rsp)
- movq %r12,1*8(%rsp)
- movq %r13,2*8(%rsp)
-
- movl $(4096/64)-5,%ecx
- .p2align 4
-.Loop64:
- dec %rcx
-
- movq (%rsi), %rax
- movq 8 (%rsi), %rbx
- movq 16 (%rsi), %rdx
- movq 24 (%rsi), %r8
- movq 32 (%rsi), %r9
- movq 40 (%rsi), %r10
- movq 48 (%rsi), %r11
- movq 56 (%rsi), %r12
-
- prefetcht0 5*64(%rsi)
-
- movq %rax, (%rdi)
- movq %rbx, 8 (%rdi)
- movq %rdx, 16 (%rdi)
- movq %r8, 24 (%rdi)
- movq %r9, 32 (%rdi)
- movq %r10, 40 (%rdi)
- movq %r11, 48 (%rdi)
- movq %r12, 56 (%rdi)
-
- leaq 64 (%rsi), %rsi
- leaq 64 (%rdi), %rdi
-
- jnz .Loop64
-
- movl $5,%ecx
- .p2align 4
-.Loop2:
- decl %ecx
-
- movq (%rsi), %rax
- movq 8 (%rsi), %rbx
- movq 16 (%rsi), %rdx
- movq 24 (%rsi), %r8
- movq 32 (%rsi), %r9
- movq 40 (%rsi), %r10
- movq 48 (%rsi), %r11
- movq 56 (%rsi), %r12
-
- movq %rax, (%rdi)
- movq %rbx, 8 (%rdi)
- movq %rdx, 16 (%rdi)
- movq %r8, 24 (%rdi)
- movq %r9, 32 (%rdi)
- movq %r10, 40 (%rdi)
- movq %r11, 48 (%rdi)
- movq %r12, 56 (%rdi)
-
- leaq 64(%rdi),%rdi
- leaq 64(%rsi),%rsi
-
- jnz .Loop2
-
- movq (%rsp),%rbx
- movq 1*8(%rsp),%r12
- movq 2*8(%rsp),%r13
- addq $3*8,%rsp
- ret
-
- /* C stepping K8 run faster using the string copy instructions.
- It is also a lot simpler. Use this when possible */
-
-#include <asm/cpufeature.h>
-
- .section .altinstructions,"a"
- .align 8
- .quad copy_page
- .quad copy_page_c
- .byte X86_FEATURE_K8_C
- .byte copy_page_c_end-copy_page_c
- .byte copy_page_c_end-copy_page_c
- .previous
-
- .section .altinstr_replacement,"ax"
-copy_page_c:
movl $4096/8,%ecx
rep
movsq
ret
-copy_page_c_end:
- .previous
diff --git a/arch/x86_64/lib/memcpy.S b/arch/x86_64/lib/memcpy.S
index c6c46494fef..92dd8054460 100644
--- a/arch/x86_64/lib/memcpy.S
+++ b/arch/x86_64/lib/memcpy.S
@@ -11,6 +11,8 @@
*
* Output:
* rax original destination
+ *
+ * TODO: check best memcpy for PSC
*/
.globl __memcpy
@@ -18,95 +20,6 @@
.p2align 4
__memcpy:
memcpy:
- pushq %rbx
- movq %rdi,%rax
-
- movl %edx,%ecx
- shrl $6,%ecx
- jz .Lhandle_tail
-
- .p2align 4
-.Lloop_64:
- decl %ecx
-
- movq (%rsi),%r11
- movq 8(%rsi),%r8
-
- movq %r11,(%rdi)
- movq %r8,1*8(%rdi)
-
- movq 2*8(%rsi),%r9
- movq 3*8(%rsi),%r10
-
- movq %r9,2*8(%rdi)
- movq %r10,3*8(%rdi)
-
- movq 4*8(%rsi),%r11
- movq 5*8(%rsi),%r8
-
- movq %r11,4*8(%rdi)
- movq %r8,5*8(%rdi)
-
- movq 6*8(%rsi),%r9
- movq 7*8(%rsi),%r10
-
- movq %r9,6*8(%rdi)
- movq %r10,7*8(%rdi)
-
- leaq 64(%rsi),%rsi
- leaq 64(%rdi),%rdi
- jnz .Lloop_64
-
-.Lhandle_tail:
- movl %edx,%ecx
- andl $63,%ecx
- shrl $3,%ecx
- jz .Lhandle_7
- .p2align 4
-.Lloop_8:
- decl %ecx
- movq (%rsi),%r8
- movq %r8,(%rdi)
- leaq 8(%rdi),%rdi
- leaq 8(%rsi),%rsi
- jnz .Lloop_8
-
-.Lhandle_7:
- movl %edx,%ecx
- andl $7,%ecx
- jz .Lende
- .p2align 4
-.Lloop_1:
- movb (%rsi),%r8b
- movb %r8b,(%rdi)
- incq %rdi
- incq %rsi
- decl %ecx
- jnz .Lloop_1
-
-.Lende:
- popq %rbx
- ret
-.Lfinal:
-
- /* C stepping K8 run faster using the string copy instructions.
- It is also a lot simpler. Use this when possible */
-
- .section .altinstructions,"a"
- .align 8
- .quad memcpy
- .quad memcpy_c
- .byte X86_FEATURE_K8_C
- .byte .Lfinal-memcpy
- .byte memcpy_c_end-memcpy_c
- .previous
-
- .section .altinstr_replacement,"ax"
- /* rdi destination
- * rsi source
- * rdx count
- */
-memcpy_c:
movq %rdi,%rax
movl %edx,%ecx
shrl $3,%ecx
@@ -117,5 +30,3 @@ memcpy_c:
rep
movsb
ret
-memcpy_c_end:
- .previous
diff --git a/arch/x86_64/lib/memset.S b/arch/x86_64/lib/memset.S
index 4b4c4063864..2aa48f24ed1 100644
--- a/arch/x86_64/lib/memset.S
+++ b/arch/x86_64/lib/memset.S
@@ -13,98 +13,6 @@
.p2align 4
memset:
__memset:
- movq %rdi,%r10
- movq %rdx,%r11
-
- /* expand byte value */
- movzbl %sil,%ecx
- movabs $0x0101010101010101,%rax
- mul %rcx /* with rax, clobbers rdx */
-
- /* align dst */
- movl %edi,%r9d
- andl $7,%r9d
- jnz .Lbad_alignment
-.Lafter_bad_alignment:
-
- movl %r11d,%ecx
- shrl $6,%ecx
- jz .Lhandle_tail
-
- .p2align 4
-.Lloop_64:
- decl %ecx
- movq %rax,(%rdi)
- movq %rax,8(%rdi)
- movq %rax,16(%rdi)
- movq %rax,24(%rdi)
- movq %rax,32(%rdi)
- movq %rax,40(%rdi)
- movq %rax,48(%rdi)
- movq %rax,56(%rdi)
- leaq 64(%rdi),%rdi
- jnz .Lloop_64
-
- /* Handle tail in loops. The loops should be faster than hard
- to predict jump tables. */
- .p2align 4
-.Lhandle_tail:
- movl %r11d,%ecx
- andl $63&(~7),%ecx
- jz .Lhandle_7
- shrl $3,%ecx
- .p2align 4
-.Lloop_8:
- decl %ecx
- movq %rax,(%rdi)
- leaq 8(%rdi),%rdi
- jnz .Lloop_8
-
-.Lhandle_7:
- movl %r11d,%ecx
- andl $7,%ecx
- jz .Lende
- .p2align 4
-.Lloop_1:
- decl %ecx
- movb %al,(%rdi)
- leaq 1(%rdi),%rdi
- jnz .Lloop_1
-
-.Lende:
- movq %r10,%rax
- ret
-
-.Lbad_alignment:
- cmpq $7,%r11
- jbe .Lhandle_7
- movq %rax,(%rdi) /* unaligned store */
- movq $8,%r8
- subq %r9,%r8
- addq %r8,%rdi
- subq %r8,%r11
- jmp .Lafter_bad_alignment
-
- /* C stepping K8 run faster using the string instructions.
- It is also a lot simpler. Use this when possible */
-
-#include <asm/cpufeature.h>
-
- .section .altinstructions,"a"
- .align 8
- .quad memset
- .quad memset_c
- .byte X86_FEATURE_K8_C
- .byte memset_c_end-memset_c
- .byte memset_c_end-memset_c
- .previous
-
- .section .altinstr_replacement,"ax"
- /* rdi destination
- * rsi value
- * rdx count
- */
-memset_c:
movq %rdi,%r9
movl %edx,%r8d
andl $7,%r8d
@@ -121,5 +29,3 @@ memset_c:
stosb
movq %r9,%rax
ret
-memset_c_end:
- .previous
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index b75b872ec15..3a63707a698 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -308,18 +308,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
unsigned long flags;
siginfo_t info;
-#ifdef CONFIG_CHECKING
- {
- unsigned long gs;
- struct x8664_pda *pda = cpu_pda + stack_smp_processor_id();
- rdmsrl(MSR_GS_BASE, gs);
- if (gs != (unsigned long)pda) {
- wrmsrl(MSR_GS_BASE, pda);
- printk("page_fault: wrong gs %lx expected %p\n", gs, pda);
- }
- }
-#endif
-
/* get the address */
__asm__("movq %%cr2,%0":"=r" (address));
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
@@ -571,3 +559,10 @@ do_sigbus:
force_sig_info(SIGBUS, &info, tsk);
return;
}
+
+static int __init enable_pagefaulttrace(char *str)
+{
+ page_fault_trace = 1;
+ return 0;
+}
+__setup("pagefaulttrace", enable_pagefaulttrace);
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index e60a1a848de..286f6a624c3 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -22,6 +22,7 @@
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
+#include <linux/pci.h>
#include <asm/processor.h>
#include <asm/system.h>
@@ -36,16 +37,13 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/smp.h>
+#include <asm/sections.h>
#ifndef Dprintk
#define Dprintk(x...)
#endif
-#ifdef CONFIG_GART_IOMMU
-extern int swiotlb;
-#endif
-
-extern char _stext[];
+static unsigned long dma_reserve __initdata;
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -86,9 +84,6 @@ void show_mem(void)
/* References to section boundaries */
-extern char _text, _etext, _edata, __bss_start, _end[];
-extern char __init_begin, __init_end;
-
int after_bootmem;
static void *spp_getpage(void)
@@ -308,42 +303,81 @@ void __init init_memory_mapping(unsigned long start, unsigned long end)
table_end<<PAGE_SHIFT);
}
-extern struct x8664_pda cpu_pda[NR_CPUS];
+void __cpuinit zap_low_mappings(int cpu)
+{
+ if (cpu == 0) {
+ pgd_t *pgd = pgd_offset_k(0UL);
+ pgd_clear(pgd);
+ } else {
+ /*
+ * For AP's, zap the low identity mappings by changing the cr3
+ * to init_level4_pgt and doing local flush tlb all
+ */
+ asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
+ }
+ __flush_tlb_all();
+}
-/* Assumes all CPUs still execute in init_mm */
-void zap_low_mappings(void)
+/* Compute zone sizes for the DMA and DMA32 zones in a node. */
+__init void
+size_zones(unsigned long *z, unsigned long *h,
+ unsigned long start_pfn, unsigned long end_pfn)
{
- pgd_t *pgd = pgd_offset_k(0UL);
- pgd_clear(pgd);
- flush_tlb_all();
+ int i;
+ unsigned long w;
+
+ for (i = 0; i < MAX_NR_ZONES; i++)
+ z[i] = 0;
+
+ if (start_pfn < MAX_DMA_PFN)
+ z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
+ if (start_pfn < MAX_DMA32_PFN) {
+ unsigned long dma32_pfn = MAX_DMA32_PFN;
+ if (dma32_pfn > end_pfn)
+ dma32_pfn = end_pfn;
+ z[ZONE_DMA32] = dma32_pfn - start_pfn;
+ }
+ z[ZONE_NORMAL] = end_pfn - start_pfn;
+
+ /* Remove lower zones from higher ones. */
+ w = 0;
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ if (z[i])
+ z[i] -= w;
+ w += z[i];
+ }
+
+ /* Compute holes */
+ w = 0;
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ unsigned long s = w;
+ w += z[i];
+ h[i] = e820_hole_size(s, w);
+ }
+
+ /* Add the space pace needed for mem_map to the holes too. */
+ for (i = 0; i < MAX_NR_ZONES; i++)
+ h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
+
+ /* The 16MB DMA zone has the kernel and other misc mappings.
+ Account them too */
+ if (h[ZONE_DMA]) {
+ h[ZONE_DMA] += dma_reserve;
+ if (h[ZONE_DMA] >= z[ZONE_DMA]) {
+ printk(KERN_WARNING
+ "Kernel too large and filling up ZONE_DMA?\n");
+ h[ZONE_DMA] = z[ZONE_DMA];
+ }
+ }
}
#ifndef CONFIG_NUMA
void __init paging_init(void)
{
- {
- unsigned long zones_size[MAX_NR_ZONES];
- unsigned long holes[MAX_NR_ZONES];
- unsigned int max_dma;
-
- memset(zones_size, 0, sizeof(zones_size));
- memset(holes, 0, sizeof(holes));
-
- max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-
- if (end_pfn < max_dma) {
- zones_size[ZONE_DMA] = end_pfn;
- holes[ZONE_DMA] = e820_hole_size(0, end_pfn);
- } else {
- zones_size[ZONE_DMA] = max_dma;
- holes[ZONE_DMA] = e820_hole_size(0, max_dma);
- zones_size[ZONE_NORMAL] = end_pfn - max_dma;
- holes[ZONE_NORMAL] = e820_hole_size(max_dma, end_pfn);
- }
- free_area_init_node(0, NODE_DATA(0), zones_size,
- __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
- }
- return;
+ unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
+ size_zones(zones, holes, 0, end_pfn);
+ free_area_init_node(0, NODE_DATA(0), zones,
+ __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
}
#endif
@@ -438,19 +472,16 @@ void __init mem_init(void)
datasize >> 10,
initsize >> 10);
+#ifdef CONFIG_SMP
/*
- * Subtle. SMP is doing its boot stuff late (because it has to
- * fork idle threads) - but it also needs low mappings for the
- * protected-mode entry to work. We zap these entries only after
- * the WP-bit has been tested.
+ * Sync boot_level4_pgt mappings with the init_level4_pgt
+ * except for the low identity mappings which are already zapped
+ * in init_level4_pgt. This sync-up is essential for AP's bringup
*/
-#ifndef CONFIG_SMP
- zap_low_mappings();
+ memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
#endif
}
-extern char __initdata_begin[], __initdata_end[];
-
void free_initmem(void)
{
unsigned long addr;
@@ -464,7 +495,7 @@ void free_initmem(void)
totalram_pages++;
}
memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
- printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
+ printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -491,6 +522,8 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
#else
reserve_bootmem(phys, len);
#endif
+ if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
+ dma_reserve += len / PAGE_SIZE;
}
int kern_addr_valid(unsigned long addr)
@@ -532,10 +565,6 @@ extern int exception_trace, page_fault_trace;
static ctl_table debug_table2[] = {
{ 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
proc_dointvec },
-#ifdef CONFIG_CHECKING
- { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
- proc_dointvec },
-#endif
{ 0, }
};
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c
index 65417b040c1..a5663e0bb01 100644
--- a/arch/x86_64/mm/k8topology.c
+++ b/arch/x86_64/mm/k8topology.c
@@ -108,6 +108,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
limit >>= 16;
limit <<= 24;
limit |= (1<<24)-1;
+ limit++;
if (limit > end_pfn << PAGE_SHIFT)
limit = end_pfn << PAGE_SHIFT;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 21480382100..a828a01739c 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -38,38 +38,57 @@ cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
int numa_off __initdata;
-int __init compute_hash_shift(struct node *nodes, int numnodes)
+
+/*
+ * Given a shift value, try to populate memnodemap[]
+ * Returns :
+ * 1 if OK
+ * 0 if memnodmap[] too small (of shift too small)
+ * -1 if node overlap or lost ram (shift too big)
+ */
+static int __init populate_memnodemap(
+ const struct node *nodes, int numnodes, int shift)
{
int i;
- int shift = 20;
- unsigned long addr,maxend=0;
-
- for (i = 0; i < numnodes; i++)
- if ((nodes[i].start != nodes[i].end) && (nodes[i].end > maxend))
- maxend = nodes[i].end;
+ int res = -1;
+ unsigned long addr, end;
- while ((1UL << shift) < (maxend / NODEMAPSIZE))
- shift++;
-
- printk (KERN_DEBUG"Using %d for the hash shift. Max adder is %lx \n",
- shift,maxend);
- memset(memnodemap,0xff,sizeof(*memnodemap) * NODEMAPSIZE);
+ memset(memnodemap, 0xff, sizeof(memnodemap));
for (i = 0; i < numnodes; i++) {
- if (nodes[i].start == nodes[i].end)
+ addr = nodes[i].start;
+ end = nodes[i].end;
+ if (addr >= end)
continue;
- for (addr = nodes[i].start;
- addr < nodes[i].end;
- addr += (1UL << shift)) {
- if (memnodemap[addr >> shift] != 0xff) {
- printk(KERN_INFO
- "Your memory is not aligned you need to rebuild your kernel "
- "with a bigger NODEMAPSIZE shift=%d adder=%lu\n",
- shift,addr);
+ if ((end >> shift) >= NODEMAPSIZE)
+ return 0;
+ do {
+ if (memnodemap[addr >> shift] != 0xff)
return -1;
- }
memnodemap[addr >> shift] = i;
- }
+ addr += (1 << shift);
+ } while (addr < end);
+ res = 1;
}
+ return res;
+}
+
+int __init compute_hash_shift(struct node *nodes, int numnodes)
+{
+ int shift = 20;
+
+ while (populate_memnodemap(nodes, numnodes, shift + 1) >= 0)
+ shift++;
+
+ printk(KERN_DEBUG "Using %d for the hash shift.\n",
+ shift);
+
+ if (populate_memnodemap(nodes, numnodes, shift) != 1) {
+ printk(KERN_INFO
+ "Your memory is not aligned you need to rebuild your kernel "
+ "with a bigger NODEMAPSIZE shift=%d\n",
+ shift);
+ return -1;
+ }
return shift;
}
@@ -94,7 +113,6 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
start_pfn = start >> PAGE_SHIFT;
end_pfn = end >> PAGE_SHIFT;
- memory_present(nodeid, start_pfn, end_pfn);
nodedata_phys = find_e820_area(start, end, pgdat_size);
if (nodedata_phys == -1L)
panic("Cannot find memory pgdat in node %d\n", nodeid);
@@ -132,29 +150,14 @@ void __init setup_node_zones(int nodeid)
unsigned long start_pfn, end_pfn;
unsigned long zones[MAX_NR_ZONES];
unsigned long holes[MAX_NR_ZONES];
- unsigned long dma_end_pfn;
- memset(zones, 0, sizeof(unsigned long) * MAX_NR_ZONES);
- memset(holes, 0, sizeof(unsigned long) * MAX_NR_ZONES);
+ start_pfn = node_start_pfn(nodeid);
+ end_pfn = node_end_pfn(nodeid);
- start_pfn = node_start_pfn(nodeid);
- end_pfn = node_end_pfn(nodeid);
+ Dprintk(KERN_INFO "setting up node %d %lx-%lx\n",
+ nodeid, start_pfn, end_pfn);
- Dprintk(KERN_INFO "setting up node %d %lx-%lx\n", nodeid, start_pfn, end_pfn);
-
- /* All nodes > 0 have a zero length zone DMA */
- dma_end_pfn = __pa(MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- if (start_pfn < dma_end_pfn) {
- zones[ZONE_DMA] = dma_end_pfn - start_pfn;
- holes[ZONE_DMA] = e820_hole_size(start_pfn, dma_end_pfn);
- zones[ZONE_NORMAL] = end_pfn - dma_end_pfn;
- holes[ZONE_NORMAL] = e820_hole_size(dma_end_pfn, end_pfn);
-
- } else {
- zones[ZONE_NORMAL] = end_pfn - start_pfn;
- holes[ZONE_NORMAL] = e820_hole_size(start_pfn, end_pfn);
- }
-
+ size_zones(zones, holes, start_pfn, end_pfn);
free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
start_pfn, holes);
}
@@ -171,7 +174,7 @@ void __init numa_init_array(void)
for (i = 0; i < NR_CPUS; i++) {
if (cpu_to_node[i] != NUMA_NO_NODE)
continue;
- cpu_to_node[i] = rr;
+ numa_set_node(i, rr);
rr = next_node(rr, node_online_map);
if (rr == MAX_NUMNODES)
rr = first_node(node_online_map);
@@ -205,8 +208,6 @@ static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
if (i == numa_fake-1)
sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
nodes[i].end = nodes[i].start + sz;
- if (i != numa_fake-1)
- nodes[i].end--;
printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n",
i,
nodes[i].start, nodes[i].end,
@@ -257,7 +258,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
nodes_clear(node_online_map);
node_set_online(0);
for (i = 0; i < NR_CPUS; i++)
- cpu_to_node[i] = 0;
+ numa_set_node(i, 0);
node_to_cpumask[0] = cpumask_of_cpu(0);
setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
}
@@ -267,6 +268,12 @@ __cpuinit void numa_add_cpu(int cpu)
set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
}
+void __cpuinit numa_set_node(int cpu, int node)
+{
+ cpu_pda[cpu].nodenumber = node;
+ cpu_to_node[cpu] = node;
+}
+
unsigned long __init numa_free_all_bootmem(void)
{
int i;
@@ -277,9 +284,26 @@ unsigned long __init numa_free_all_bootmem(void)
return pages;
}
+#ifdef CONFIG_SPARSEMEM
+static void __init arch_sparse_init(void)
+{
+ int i;
+
+ for_each_online_node(i)
+ memory_present(i, node_start_pfn(i), node_end_pfn(i));
+
+ sparse_init();
+}
+#else
+#define arch_sparse_init() do {} while (0)
+#endif
+
void __init paging_init(void)
{
int i;
+
+ arch_sparse_init();
+
for_each_online_node(i) {
setup_node_zones(i);
}
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 4b2e844c15a..33340bd1e32 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -71,8 +71,6 @@ static __init void cutoff_node(int i, unsigned long start, unsigned long end)
nd->start = nd->end;
}
if (nd->end > end) {
- if (!(end & 0xfff))
- end--;
nd->end = end;
if (nd->start > nd->end)
nd->start = nd->end;
@@ -166,8 +164,6 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
if (nd->end < end)
nd->end = end;
}
- if (!(nd->end & 0xfff))
- nd->end--;
printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
nd->start, nd->end);
}
@@ -203,7 +199,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
if (cpu_to_node[i] == NUMA_NO_NODE)
continue;
if (!node_isset(cpu_to_node[i], nodes_parsed))
- cpu_to_node[i] = NUMA_NO_NODE;
+ numa_set_node(i, NUMA_NO_NODE);
}
numa_init_array();
return 0;