summaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig61
-rw-r--r--arch/x86_64/Kconfig.debug18
-rw-r--r--arch/x86_64/Makefile4
-rw-r--r--arch/x86_64/boot/Makefile9
-rw-r--r--arch/x86_64/boot/compressed/misc.c46
-rw-r--r--arch/x86_64/boot/setup.S1
-rw-r--r--arch/x86_64/boot/tools/build.c6
-rw-r--r--arch/x86_64/boot/video.S19
-rw-r--r--arch/x86_64/crypto/aes-x86_64-asm.S22
-rw-r--r--arch/x86_64/crypto/aes.c20
-rw-r--r--arch/x86_64/defconfig159
-rw-r--r--arch/x86_64/ia32/Makefile3
-rw-r--r--arch/x86_64/ia32/audit.c11
-rw-r--r--arch/x86_64/ia32/fpu32.c1
-rw-r--r--arch/x86_64/ia32/ia32_signal.c2
-rw-r--r--arch/x86_64/ia32/ia32entry.S12
-rw-r--r--arch/x86_64/ia32/ptrace32.c43
-rw-r--r--arch/x86_64/ia32/sys_ia32.c26
-rw-r--r--arch/x86_64/kernel/Makefile9
-rw-r--r--arch/x86_64/kernel/acpi/Makefile1
-rw-r--r--arch/x86_64/kernel/acpi/processor.c72
-rw-r--r--arch/x86_64/kernel/acpi/sleep.c8
-rw-r--r--arch/x86_64/kernel/aperture.c27
-rw-r--r--arch/x86_64/kernel/apic.c35
-rw-r--r--arch/x86_64/kernel/asm-offsets.c3
-rw-r--r--arch/x86_64/kernel/audit.c29
-rw-r--r--arch/x86_64/kernel/crash.c6
-rw-r--r--arch/x86_64/kernel/e820.c3
-rw-r--r--arch/x86_64/kernel/entry.S116
-rw-r--r--arch/x86_64/kernel/functionlist1
-rw-r--r--arch/x86_64/kernel/genapic.c1
-rw-r--r--arch/x86_64/kernel/genapic_cluster.c1
-rw-r--r--arch/x86_64/kernel/genapic_flat.c31
-rw-r--r--arch/x86_64/kernel/head64.c2
-rw-r--r--arch/x86_64/kernel/i387.c3
-rw-r--r--arch/x86_64/kernel/i8259.c23
-rw-r--r--arch/x86_64/kernel/io_apic.c65
-rw-r--r--arch/x86_64/kernel/irq.c48
-rw-r--r--arch/x86_64/kernel/k8.c118
-rw-r--r--arch/x86_64/kernel/kprobes.c1
-rw-r--r--arch/x86_64/kernel/machine_kexec.c4
-rw-r--r--arch/x86_64/kernel/mce.c6
-rw-r--r--arch/x86_64/kernel/mce_amd.c506
-rw-r--r--arch/x86_64/kernel/module.c38
-rw-r--r--arch/x86_64/kernel/mpparse.c1
-rw-r--r--arch/x86_64/kernel/nmi.c92
-rw-r--r--arch/x86_64/kernel/pci-calgary.c1018
-rw-r--r--arch/x86_64/kernel/pci-dma.c55
-rw-r--r--arch/x86_64/kernel/pci-gart.c156
-rw-r--r--arch/x86_64/kernel/pci-nommu.c9
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/pmtimer.c2
-rw-r--r--arch/x86_64/kernel/process.c16
-rw-r--r--arch/x86_64/kernel/reboot.c1
-rw-r--r--arch/x86_64/kernel/setup.c184
-rw-r--r--arch/x86_64/kernel/setup64.c4
-rw-r--r--arch/x86_64/kernel/signal.c3
-rw-r--r--arch/x86_64/kernel/smp.c16
-rw-r--r--arch/x86_64/kernel/smpboot.c32
-rw-r--r--arch/x86_64/kernel/suspend.c1
-rw-r--r--arch/x86_64/kernel/syscall.c1
-rw-r--r--arch/x86_64/kernel/tce.c202
-rw-r--r--arch/x86_64/kernel/time.c89
-rw-r--r--arch/x86_64/kernel/traps.c84
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S30
-rw-r--r--arch/x86_64/kernel/vsyscall.c4
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c114
-rw-r--r--arch/x86_64/lib/csum-partial.c1
-rw-r--r--arch/x86_64/lib/csum-wrappers.c1
-rw-r--r--arch/x86_64/lib/delay.c5
-rw-r--r--arch/x86_64/lib/memmove.c4
-rw-r--r--arch/x86_64/lib/usercopy.c13
-rw-r--r--arch/x86_64/mm/extable.c1
-rw-r--r--arch/x86_64/mm/fault.c50
-rw-r--r--arch/x86_64/mm/init.c129
-rw-r--r--arch/x86_64/mm/ioremap.c5
-rw-r--r--arch/x86_64/mm/mmap.c1
-rw-r--r--arch/x86_64/mm/pageattr.c1
-rw-r--r--arch/x86_64/mm/srat.c33
-rw-r--r--arch/x86_64/pci/k8-bus.c10
-rw-r--r--arch/x86_64/pci/mmconfig.c13
81 files changed, 2889 insertions, 1114 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 408d44a5975..e856804c447 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -299,6 +299,7 @@ config X86_64_ACPI_NUMA
bool "ACPI NUMA detection"
depends on NUMA
select ACPI
+ select PCI
select ACPI_NUMA
default y
help
@@ -369,6 +370,8 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config ARCH_ENABLE_MEMORY_HOTPLUG
+ def_bool y
config HPET_TIMER
bool
@@ -385,27 +388,47 @@ config HPET_EMULATE_RTC
bool "Provide RTC interrupt"
depends on HPET_TIMER && RTC=y
-config GART_IOMMU
- bool "K8 GART IOMMU support"
+# Mark as embedded because too many people got it wrong.
+# The code disables itself when not needed.
+config IOMMU
+ bool "IOMMU support" if EMBEDDED
default y
select SWIOTLB
+ select AGP
depends on PCI
help
- Support for hardware IOMMU in AMD's Opteron/Athlon64 Processors
- and for the bounce buffering software IOMMU.
- Needed to run systems with more than 3GB of memory properly with
- 32-bit PCI devices that do not support DAC (Double Address Cycle).
- The IOMMU can be turned off at runtime with the iommu=off parameter.
- Normally the kernel will take the right choice by itself.
- This option includes a driver for the AMD Opteron/Athlon64 IOMMU
- northbridge and a software emulation used on other systems without
- hardware IOMMU. If unsure, say Y.
-
-# need this always enabled with GART_IOMMU for the VIA workaround
+ Support for full DMA access of devices with 32bit memory access only
+ on systems with more than 3GB. This is usually needed for USB,
+ sound, many IDE/SATA chipsets and some other devices.
+ Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART
+ based IOMMU and a software bounce buffer based IOMMU used on Intel
+ systems and as fallback.
+ The code is only active when needed (enough memory and limited
+ device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified
+ too.
+
+config CALGARY_IOMMU
+ bool "IBM Calgary IOMMU support"
+ default y
+ select SWIOTLB
+ depends on PCI && EXPERIMENTAL
+ help
+ Support for hardware IOMMUs in IBM's xSeries x366 and x460
+ systems. Needed to run systems with more than 3GB of memory
+ properly with 32-bit PCI devices that do not support DAC
+ (Double Address Cycle). Calgary also supports bus level
+ isolation, where all DMAs pass through the IOMMU. This
+ prevents them from going anywhere except their intended
+ destination. This catches hard-to-find kernel bugs and
+ mis-behaving drivers and devices that do not use the DMA-API
+ properly to set up their DMA buffers. The IOMMU can be
+ turned off at boot time with the iommu=off parameter.
+ Normally the kernel will make the right choice by itself.
+ If unsure, say Y.
+
+# need this always selected by IOMMU for the VIA workaround
config SWIOTLB
bool
- default y
- depends on GART_IOMMU
config X86_MCE
bool "Machine check support" if EMBEDDED
@@ -438,10 +461,10 @@ config KEXEC
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
- but it is indepedent of the system firmware. And like a reboot
+ but it is independent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
- The name comes from the similiarity to the exec system call.
+ The name comes from the similarity to the exec system call.
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
@@ -501,6 +524,10 @@ config REORDER
optimal TLB usage. If you have pretty much any version of binutils,
this can increase your kernel build time by roughly one minute.
+config K8_NB
+ def_bool y
+ depends on AGP_AMD64 || IOMMU || (PCI && NUMA)
+
endmenu
#
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index ea31b4c6210..1d92ab56c0f 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -13,7 +13,7 @@ config DEBUG_RODATA
If in doubt, say "N".
config IOMMU_DEBUG
- depends on GART_IOMMU && DEBUG_KERNEL
+ depends on IOMMU && DEBUG_KERNEL
bool "Enable IOMMU debugging"
help
Force the IOMMU to on even when you have less than 4GB of
@@ -35,6 +35,22 @@ config IOMMU_LEAK
Add a simple leak tracer to the IOMMU code. This is useful when you
are debugging a buggy device driver that leaks IOMMU mappings.
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+ help
+ This option will cause messages to be printed if free stack space
+ drops below a certain limit.
+
+config DEBUG_STACK_USAGE
+ bool "Stack utilization instrumentation"
+ depends on DEBUG_KERNEL
+ help
+ Enables the display of the minimum amount of free stack which each
+ task has ever had available in the sysrq-T and sysrq-P debug output.
+
+ This option will slow down process creation somewhat.
+
#config X86_REMOTE_DEBUG
# bool "kgdb debugging stub"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index e573e2ab551..431bb4bc36c 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -27,6 +27,7 @@ LDFLAGS_vmlinux :=
CHECKFLAGS += -D__x86_64__ -m64
cflags-y :=
+cflags-kernel-y :=
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
@@ -35,7 +36,7 @@ cflags-y += -m64
cflags-y += -mno-red-zone
cflags-y += -mcmodel=kernel
cflags-y += -pipe
-cflags-$(CONFIG_REORDER) += -ffunction-sections
+cflags-kernel-$(CONFIG_REORDER) += -ffunction-sections
# this makes reading assembly source easier, but produces worse code
# actually it makes the kernel smaller too.
cflags-y += -fno-reorder-blocks
@@ -55,6 +56,7 @@ cflags-y += $(call cc-option,-funit-at-a-time)
cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
CFLAGS += $(cflags-y)
+CFLAGS_KERNEL += $(cflags-kernel-y)
AFLAGS += -m64
head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile
index 43ee6c50c27..deb063e7762 100644
--- a/arch/x86_64/boot/Makefile
+++ b/arch/x86_64/boot/Makefile
@@ -107,8 +107,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
isoimage: $(BOOTIMAGE)
-rm -rf $(obj)/isoimage
mkdir $(obj)/isoimage
- cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \
- $(obj)/isoimage
+ for i in lib lib64 share end ; do \
+ if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
+ cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
+ break ; \
+ fi ; \
+ if [ $$i = end ] ; then exit 1 ; fi ; \
+ done
cp $(BOOTIMAGE) $(obj)/isoimage/linux
echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
if [ -f '$(FDINITRD)' ] ; then \
diff --git a/arch/x86_64/boot/compressed/misc.c b/arch/x86_64/boot/compressed/misc.c
index cf4b88c416d..3755b2e394d 100644
--- a/arch/x86_64/boot/compressed/misc.c
+++ b/arch/x86_64/boot/compressed/misc.c
@@ -77,11 +77,11 @@ static void gzip_release(void **);
*/
static unsigned char *real_mode; /* Pointer to real-mode data */
-#define EXT_MEM_K (*(unsigned short *)(real_mode + 0x2))
+#define RM_EXT_MEM_K (*(unsigned short *)(real_mode + 0x2))
#ifndef STANDARD_MEMORY_BIOS_CALL
-#define ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0))
+#define RM_ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0))
#endif
-#define SCREEN_INFO (*(struct screen_info *)(real_mode+0))
+#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
extern unsigned char input_data[];
extern int input_len;
@@ -92,9 +92,9 @@ static unsigned long output_ptr = 0;
static void *malloc(int size);
static void free(void *where);
-
-void* memset(void* s, int c, unsigned n);
-void* memcpy(void* dest, const void* src, unsigned n);
+
+static void *memset(void *s, int c, unsigned n);
+static void *memcpy(void *dest, const void *src, unsigned n);
static void putstr(const char *);
@@ -162,8 +162,8 @@ static void putstr(const char *s)
int x,y,pos;
char c;
- x = SCREEN_INFO.orig_x;
- y = SCREEN_INFO.orig_y;
+ x = RM_SCREEN_INFO.orig_x;
+ y = RM_SCREEN_INFO.orig_y;
while ( ( c = *s++ ) != '\0' ) {
if ( c == '\n' ) {
@@ -184,8 +184,8 @@ static void putstr(const char *s)
}
}
- SCREEN_INFO.orig_x = x;
- SCREEN_INFO.orig_y = y;
+ RM_SCREEN_INFO.orig_x = x;
+ RM_SCREEN_INFO.orig_y = y;
pos = (x + cols * y) * 2; /* Update cursor position */
outb_p(14, vidport);
@@ -194,7 +194,7 @@ static void putstr(const char *s)
outb_p(0xff & (pos >> 1), vidport+1);
}
-void* memset(void* s, int c, unsigned n)
+static void* memset(void* s, int c, unsigned n)
{
int i;
char *ss = (char*)s;
@@ -203,7 +203,7 @@ void* memset(void* s, int c, unsigned n)
return s;
}
-void* memcpy(void* dest, const void* src, unsigned n)
+static void* memcpy(void* dest, const void* src, unsigned n)
{
int i;
char *d = (char *)dest, *s = (char *)src;
@@ -278,15 +278,15 @@ static void error(char *x)
putstr(x);
putstr("\n\n -- System halted");
- while(1);
+ while(1); /* Halt */
}
-void setup_normal_output_buffer(void)
+static void setup_normal_output_buffer(void)
{
#ifdef STANDARD_MEMORY_BIOS_CALL
- if (EXT_MEM_K < 1024) error("Less than 2MB of memory");
+ if (RM_EXT_MEM_K < 1024) error("Less than 2MB of memory");
#else
- if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory");
+ if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
#endif
output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
free_mem_end_ptr = (long)real_mode;
@@ -297,13 +297,13 @@ struct moveparams {
uch *high_buffer_start; int hcount;
};
-void setup_output_buffer_if_we_run_high(struct moveparams *mv)
+static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
{
high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
#ifdef STANDARD_MEMORY_BIOS_CALL
- if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
+ if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
#else
- if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
+ if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
#endif
mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
@@ -319,7 +319,7 @@ void setup_output_buffer_if_we_run_high(struct moveparams *mv)
mv->high_buffer_start = high_buffer_start;
}
-void close_output_buffer_if_we_run_high(struct moveparams *mv)
+static void close_output_buffer_if_we_run_high(struct moveparams *mv)
{
if (bytes_out > low_buffer_size) {
mv->lcount = low_buffer_size;
@@ -335,7 +335,7 @@ int decompress_kernel(struct moveparams *mv, void *rmode)
{
real_mode = rmode;
- if (SCREEN_INFO.orig_video_mode == 7) {
+ if (RM_SCREEN_INFO.orig_video_mode == 7) {
vidmem = (char *) 0xb0000;
vidport = 0x3b4;
} else {
@@ -343,8 +343,8 @@ int decompress_kernel(struct moveparams *mv, void *rmode)
vidport = 0x3d4;
}
- lines = SCREEN_INFO.orig_video_lines;
- cols = SCREEN_INFO.orig_video_cols;
+ lines = RM_SCREEN_INFO.orig_video_lines;
+ cols = RM_SCREEN_INFO.orig_video_cols;
if (free_mem_ptr < 0x100000) setup_normal_output_buffer();
else setup_output_buffer_if_we_run_high(mv);
diff --git a/arch/x86_64/boot/setup.S b/arch/x86_64/boot/setup.S
index 12ea0b6c52e..7de8b8fd168 100644
--- a/arch/x86_64/boot/setup.S
+++ b/arch/x86_64/boot/setup.S
@@ -45,7 +45,6 @@
* Added long mode checking and SSE force. March 2003, Andi Kleen.
*/
-#include <linux/config.h>
#include <asm/segment.h>
#include <linux/version.h>
#include <linux/compile.h>
diff --git a/arch/x86_64/boot/tools/build.c b/arch/x86_64/boot/tools/build.c
index c44f5e2ec10..eae86691709 100644
--- a/arch/x86_64/boot/tools/build.c
+++ b/arch/x86_64/boot/tools/build.c
@@ -149,10 +149,8 @@ int main(int argc, char ** argv)
sz = sb.st_size;
fprintf (stderr, "System is %d kB\n", sz/1024);
sys_size = (sz + 15) / 16;
- /* 0x40000*16 = 4.0 MB, reasonable estimate for the current maximum */
- if (sys_size > (is_big_kernel ? 0x40000 : DEF_SYSSIZE))
- die("System is too big. Try using %smodules.",
- is_big_kernel ? "" : "bzImage or ");
+ if (!is_big_kernel && sys_size > DEF_SYSSIZE)
+ die("System is too big. Try using bzImage or modules.");
while (sz > 0) {
int l, n;
diff --git a/arch/x86_64/boot/video.S b/arch/x86_64/boot/video.S
index 32327bb37af..2aa565c136e 100644
--- a/arch/x86_64/boot/video.S
+++ b/arch/x86_64/boot/video.S
@@ -1929,6 +1929,7 @@ skip10: movb %ah, %al
ret
store_edid:
+#ifdef CONFIG_FIRMWARE_EDID
pushw %es # just save all registers
pushw %ax
pushw %bx
@@ -1946,6 +1947,22 @@ store_edid:
rep
stosl
+ pushw %es # save ES
+ xorw %di, %di # Report Capability
+ pushw %di
+ popw %es # ES:DI must be 0:0
+ movw $0x4f15, %ax
+ xorw %bx, %bx
+ xorw %cx, %cx
+ int $0x10
+ popw %es # restore ES
+
+ cmpb $0x00, %ah # call successful
+ jne no_edid
+
+ cmpb $0x4f, %al # function supported
+ jne no_edid
+
movw $0x4f15, %ax # do VBE/DDC
movw $0x01, %bx
movw $0x00, %cx
@@ -1953,12 +1970,14 @@ store_edid:
movw $0x140, %di
int $0x10
+no_edid:
popw %di # restore all registers
popw %dx
popw %cx
popw %bx
popw %ax
popw %es
+#endif
ret
# VIDEO_SELECT-only variables
diff --git a/arch/x86_64/crypto/aes-x86_64-asm.S b/arch/x86_64/crypto/aes-x86_64-asm.S
index 483cbb23ab8..26b40de4d0b 100644
--- a/arch/x86_64/crypto/aes-x86_64-asm.S
+++ b/arch/x86_64/crypto/aes-x86_64-asm.S
@@ -15,6 +15,10 @@
.text
+#include <asm/asm-offsets.h>
+
+#define BASE crypto_tfm_ctx_offset
+
#define R1 %rax
#define R1E %eax
#define R1X %ax
@@ -46,19 +50,19 @@
#define R10 %r10
#define R11 %r11
-#define prologue(FUNC,BASE,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \
+#define prologue(FUNC,KEY,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \
.global FUNC; \
.type FUNC,@function; \
.align 8; \
FUNC: movq r1,r2; \
movq r3,r4; \
- leaq BASE+52(r8),r9; \
+ leaq BASE+KEY+52(r8),r9; \
movq r10,r11; \
movl (r7),r5 ## E; \
movl 4(r7),r1 ## E; \
movl 8(r7),r6 ## E; \
movl 12(r7),r7 ## E; \
- movl (r8),r10 ## E; \
+ movl BASE(r8),r10 ## E; \
xorl -48(r9),r5 ## E; \
xorl -44(r9),r1 ## E; \
xorl -40(r9),r6 ## E; \
@@ -128,8 +132,8 @@ FUNC: movq r1,r2; \
movl r3 ## E,r1 ## E; \
movl r4 ## E,r2 ## E;
-#define entry(FUNC,BASE,B128,B192) \
- prologue(FUNC,BASE,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11)
+#define entry(FUNC,KEY,B128,B192) \
+ prologue(FUNC,KEY,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11)
#define return epilogue(R8,R2,R9,R7,R5,R6,R3,R4,R11)
@@ -147,9 +151,9 @@ FUNC: movq r1,r2; \
#define decrypt_final(TAB,OFFSET) \
round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4)
-/* void aes_encrypt(void *ctx, u8 *out, const u8 *in) */
+/* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */
- entry(aes_encrypt,0,enc128,enc192)
+ entry(aes_enc_blk,0,enc128,enc192)
encrypt_round(aes_ft_tab,-96)
encrypt_round(aes_ft_tab,-80)
enc192: encrypt_round(aes_ft_tab,-64)
@@ -166,9 +170,9 @@ enc128: encrypt_round(aes_ft_tab,-32)
encrypt_final(aes_fl_tab,112)
return
-/* void aes_decrypt(void *ctx, u8 *out, const u8 *in) */
+/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */
- entry(aes_decrypt,240,dec128,dec192)
+ entry(aes_dec_blk,240,dec128,dec192)
decrypt_round(aes_it_tab,-96)
decrypt_round(aes_it_tab,-80)
dec192: decrypt_round(aes_it_tab,-64)
diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c
index 6f77e7700d3..68866fab37a 100644
--- a/arch/x86_64/crypto/aes.c
+++ b/arch/x86_64/crypto/aes.c
@@ -227,10 +227,10 @@ static void __init gen_tabs(void)
t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
}
-static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
- u32 *flags)
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len, u32 *flags)
{
- struct aes_ctx *ctx = ctx_arg;
+ struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
u32 i, j, t, u, v, w;
@@ -283,8 +283,18 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
return 0;
}
-extern void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in);
-extern void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in);
+asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ aes_enc_blk(tfm, dst, src);
+}
+
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ aes_dec_blk(tfm, dst, src);
+}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 69db0c0721d..e69d403949c 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.17-rc1-git11
-# Sun Apr 16 07:22:36 2006
+# Linux kernel version: 2.6.17-git6
+# Sat Jun 24 00:52:28 2006
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@@ -42,7 +42,6 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_UID16=y
-CONFIG_VM86=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
@@ -57,7 +56,6 @@ CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_SHMEM=y
CONFIG_SLAB=y
-CONFIG_DOUBLEFAULT=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
# CONFIG_SLOB is not set
@@ -144,7 +142,8 @@ CONFIG_NR_CPUS=32
CONFIG_HOTPLUG_CPU=y
CONFIG_HPET_TIMER=y
CONFIG_HPET_EMULATE_RTC=y
-CONFIG_GART_IOMMU=y
+CONFIG_IOMMU=y
+# CONFIG_CALGARY_IOMMU is not set
CONFIG_SWIOTLB=y
CONFIG_X86_MCE=y
CONFIG_X86_MCE_INTEL=y
@@ -158,6 +157,7 @@ CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
# CONFIG_REORDER is not set
+CONFIG_K8_NB=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_ISA_DMA_API=y
@@ -293,6 +293,8 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
@@ -305,7 +307,10 @@ CONFIG_IPV6=y
# CONFIG_INET6_IPCOMP is not set
# CONFIG_INET6_XFRM_TUNNEL is not set
# CONFIG_INET6_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
#
@@ -344,6 +349,7 @@ CONFIG_IPV6=y
# Network testing
#
# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
@@ -360,6 +366,7 @@ CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
#
# Connector - unified userspace <-> kernelspace linker
@@ -526,6 +533,7 @@ CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_MV is not set
CONFIG_SCSI_SATA_NV=y
# CONFIG_SCSI_PDC_ADMA is not set
+# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_SATA_QSTOR is not set
# CONFIG_SCSI_SATA_PROMISE is not set
# CONFIG_SCSI_SATA_SX4 is not set
@@ -591,10 +599,7 @@ CONFIG_IEEE1394=y
#
# Device Drivers
#
-
-#
-# Texas Instruments PCILynx requires I2C
-#
+# CONFIG_IEEE1394_PCILYNX is not set
CONFIG_IEEE1394_OHCI1394=y
#
@@ -645,7 +650,16 @@ CONFIG_VORTEX=y
#
# Tulip family network device support
#
-# CONFIG_NET_TULIP is not set
+CONFIG_NET_TULIP=y
+# CONFIG_DE2104X is not set
+CONFIG_TULIP=y
+# CONFIG_TULIP_MWI is not set
+# CONFIG_TULIP_MMIO is not set
+# CONFIG_TULIP_NAPI is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
+# CONFIG_ULI526X is not set
# CONFIG_HP100 is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
@@ -697,6 +711,7 @@ CONFIG_TIGON3=y
# CONFIG_IXGB is not set
CONFIG_S2IO=m
# CONFIG_S2IO_NAPI is not set
+# CONFIG_MYRI10GE is not set
#
# Token Ring devices
@@ -887,7 +902,56 @@ CONFIG_HPET_MMAP=y
#
# I2C support
#
-# CONFIG_I2C is not set
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_ISA=m
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
#
# SPI support
@@ -898,14 +962,51 @@ CONFIG_HPET_MMAP=y
#
# Dallas's 1-wire bus
#
-# CONFIG_W1 is not set
#
# Hardware Monitoring support
#
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+CONFIG_SENSORS_SMSC47B397=m
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_HDAPS is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
@@ -918,6 +1019,7 @@ CONFIG_HWMON=y
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
+CONFIG_VIDEO_V4L2=y
#
# Digital Video Broadcasting Devices
@@ -953,28 +1055,17 @@ CONFIG_SOUND=y
# Open Sound System
#
CONFIG_SOUND_PRIME=y
-CONFIG_OBSOLETE_OSS_DRIVER=y
# CONFIG_SOUND_BT878 is not set
-# CONFIG_SOUND_CMPCI is not set
# CONFIG_SOUND_EMU10K1 is not set
# CONFIG_SOUND_FUSION is not set
-# CONFIG_SOUND_CS4281 is not set
-# CONFIG_SOUND_ES1370 is not set
# CONFIG_SOUND_ES1371 is not set
-# CONFIG_SOUND_ESSSOLO1 is not set
-# CONFIG_SOUND_MAESTRO is not set
-# CONFIG_SOUND_MAESTRO3 is not set
CONFIG_SOUND_ICH=y
-# CONFIG_SOUND_SONICVIBES is not set
# CONFIG_SOUND_TRIDENT is not set
# CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_VIA82CXXX is not set
# CONFIG_SOUND_OSS is not set
-# CONFIG_SOUND_ALI5455 is not set
-# CONFIG_SOUND_FORTE is not set
-# CONFIG_SOUND_RME96XX is not set
-# CONFIG_SOUND_AD1980 is not set
+# CONFIG_SOUND_TVMIXER is not set
#
# USB support
@@ -1000,6 +1091,7 @@ CONFIG_USB_DEVICEFS=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
# CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN is not set
@@ -1089,10 +1181,12 @@ CONFIG_USB_MON=y
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
# CONFIG_USB_LED is not set
+# CONFIG_USB_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
# CONFIG_USB_PHIDGETKIT is not set
# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_APPLEDISPLAY is not set
# CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_LD is not set
# CONFIG_USB_TEST is not set
@@ -1141,6 +1235,19 @@ CONFIG_USB_MON=y
# CONFIG_RTC_CLASS is not set
#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
# Firmware Drivers
#
# CONFIG_EDD is not set
@@ -1175,6 +1282,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=y
@@ -1331,7 +1439,8 @@ CONFIG_DETECT_SOFTLOCKUP=y
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_FRAME_POINTER is not set
-# CONFIG_UNWIND_INFO is not set
+CONFIG_UNWIND_INFO=y
+CONFIG_STACK_UNWIND=y
# CONFIG_FORCED_INLINING is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_DEBUG_RODATA is not set
diff --git a/arch/x86_64/ia32/Makefile b/arch/x86_64/ia32/Makefile
index e9263b4975e..62bc5f56da9 100644
--- a/arch/x86_64/ia32/Makefile
+++ b/arch/x86_64/ia32/Makefile
@@ -11,6 +11,9 @@ obj-$(CONFIG_IA32_EMULATION) += $(sysv-y)
obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
+audit-class-$(CONFIG_AUDIT) := audit.o
+obj-$(CONFIG_IA32_EMULATION) += $(audit-class-y)
+
$(obj)/syscall32_syscall.o: \
$(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
diff --git a/arch/x86_64/ia32/audit.c b/arch/x86_64/ia32/audit.c
new file mode 100644
index 00000000000..ab94f2e58cd
--- /dev/null
+++ b/arch/x86_64/ia32/audit.c
@@ -0,0 +1,11 @@
+#include <asm-i386/unistd.h>
+
+unsigned ia32_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned ia32_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
diff --git a/arch/x86_64/ia32/fpu32.c b/arch/x86_64/ia32/fpu32.c
index 1c23095f181..2c8209a3605 100644
--- a/arch/x86_64/ia32/fpu32.c
+++ b/arch/x86_64/ia32/fpu32.c
@@ -2,7 +2,6 @@
* Copyright 2002 Andi Kleen, SuSE Labs.
* FXSAVE<->i387 conversion support. Based on code by Gareth Hughes.
* This is used for ptrace, signals and coredumps in 32bit emulation.
- * $Id: fpu32.c,v 1.1 2002/03/21 14:16:32 ak Exp $
*/
#include <linux/sched.h>
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index e0a92439f63..25e5ca22204 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -6,8 +6,6 @@
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
* 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen
- *
- * $Id: ia32_signal.c,v 1.22 2002/07/29 10:34:03 ak Exp $
*/
#include <linux/sched.h>
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 5a92fed2d1d..c536fa98ea3 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -155,6 +155,7 @@ sysenter_tracesys:
.previous
jmp sysenter_do_call
CFI_ENDPROC
+ENDPROC(ia32_sysenter_target)
/*
* 32bit SYSCALL instruction entry.
@@ -178,7 +179,7 @@ sysenter_tracesys:
*/
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
- CFI_DEF_CFA rsp,0
+ CFI_DEF_CFA rsp,PDA_STACKOFFSET
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
swapgs
@@ -249,6 +250,7 @@ cstar_tracesys:
.quad 1b,ia32_badarg
.previous
jmp cstar_do_call
+END(ia32_cstar_target)
ia32_badarg:
movq $-EFAULT,%rax
@@ -314,16 +316,13 @@ ia32_tracesys:
LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
jmp ia32_do_syscall
+END(ia32_syscall)
ia32_badsys:
movq $0,ORIG_RAX-ARGOFFSET(%rsp)
movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
jmp int_ret_from_sys_call
-ni_syscall:
- movq %rax,%rdi
- jmp sys32_ni_syscall
-
quiet_ni_syscall:
movq $-ENOSYS,%rax
ret
@@ -370,10 +369,10 @@ ENTRY(ia32_ptregs_common)
RESTORE_REST
jmp ia32_sysret /* misbalances the return cache */
CFI_ENDPROC
+END(ia32_ptregs_common)
.section .rodata,"a"
.align 8
- .globl ia32_sys_call_table
ia32_sys_call_table:
.quad sys_restart_syscall
.quad sys_exit
@@ -696,4 +695,5 @@ ia32_sys_call_table:
.quad sys_sync_file_range
.quad sys_tee
.quad compat_sys_vmsplice
+ .quad compat_sys_move_pages
ia32_syscall_end:
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c
index 23a4515a73b..a590b7a0d92 100644
--- a/arch/x86_64/ia32/ptrace32.c
+++ b/arch/x86_64/ia32/ptrace32.c
@@ -7,8 +7,6 @@
*
* This allows to access 64bit processes too; but there is no way to see the extended
* register contents.
- *
- * $Id: ptrace32.c,v 1.16 2003/03/14 16:06:35 ak Exp $
*/
#include <linux/kernel.h>
@@ -27,6 +25,7 @@
#include <asm/debugreg.h>
#include <asm/i387.h>
#include <asm/fpu32.h>
+#include <asm/ia32.h>
/*
* Determines which flags the user has access to [1 = access, 0 = no access].
@@ -199,6 +198,24 @@ static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
#undef R32
+static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
+{
+ int ret;
+ compat_siginfo_t *si32 = (compat_siginfo_t *)compat_ptr(data);
+ siginfo_t *si = compat_alloc_user_space(sizeof(siginfo_t));
+ if (request == PTRACE_SETSIGINFO) {
+ ret = copy_siginfo_from_user32(si, si32);
+ if (ret)
+ return ret;
+ }
+ ret = sys_ptrace(request, pid, addr, (unsigned long)si);
+ if (ret)
+ return ret;
+ if (request == PTRACE_GETSIGINFO)
+ ret = copy_siginfo_to_user32(si32, si);
+ return ret;
+}
+
asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
{
struct task_struct *child;
@@ -208,9 +225,19 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
__u32 val;
switch (request) {
- default:
+ case PTRACE_TRACEME:
+ case PTRACE_ATTACH:
+ case PTRACE_KILL:
+ case PTRACE_CONT:
+ case PTRACE_SINGLESTEP:
+ case PTRACE_DETACH:
+ case PTRACE_SYSCALL:
+ case PTRACE_SETOPTIONS:
return sys_ptrace(request, pid, addr, data);
+ default:
+ return -EINVAL;
+
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
case PTRACE_POKEDATA:
@@ -225,10 +252,11 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
case PTRACE_GETFPXREGS:
case PTRACE_GETEVENTMSG:
break;
- }
- if (request == PTRACE_TRACEME)
- return ptrace_traceme();
+ case PTRACE_SETSIGINFO:
+ case PTRACE_GETSIGINFO:
+ return ptrace32_siginfo(request, pid, addr, data);
+ }
child = ptrace_get_task_struct(pid);
if (IS_ERR(child))
@@ -349,8 +377,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
break;
default:
- ret = -EINVAL;
- break;
+ BUG();
}
out:
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index f182b20858e..9c130993380 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -20,7 +20,6 @@
* This should be fixed.
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
@@ -508,19 +507,6 @@ sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
return compat_sys_wait4(pid, stat_addr, options, NULL);
}
-int sys32_ni_syscall(int call)
-{
- struct task_struct *me = current;
- static char lastcomm[sizeof(me->comm)];
-
- if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
- printk(KERN_INFO "IA32 syscall %d from %s not implemented\n",
- call, me->comm);
- strncpy(lastcomm, me->comm, sizeof(lastcomm));
- }
- return -ENOSYS;
-}
-
/* 32-bit timeval and related flotsam. */
asmlinkage long
@@ -916,7 +902,7 @@ long sys32_vm86_warning(void)
struct task_struct *me = current;
static char lastcomm[sizeof(me->comm)];
if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
- printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
+ compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
me->comm);
strncpy(lastcomm, me->comm, sizeof(lastcomm));
}
@@ -929,13 +915,3 @@ long sys32_lookup_dcookie(u32 addr_low, u32 addr_high,
return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
}
-static int __init ia32_init (void)
-{
- printk("IA32 emulation $Id: sys_ia32.c,v 1.32 2002/03/24 13:02:28 ak Exp $\n");
- return 0;
-}
-
-__initcall(ia32_init);
-
-extern unsigned long ia32_sys_call_table[];
-EXPORT_SYMBOL(ia32_sys_call_table);
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index 059c88313f4..819e84ec5b6 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -8,7 +8,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
x8664_ksyms.o i387.o syscall.o vsyscall.o \
setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
- pci-dma.o pci-nommu.o
+ pci-dma.o pci-nommu.o alternative.o
obj-$(CONFIG_X86_MCE) += mce.o
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
@@ -28,11 +28,14 @@ obj-$(CONFIG_PM) += suspend.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o
+obj-$(CONFIG_IOMMU) += pci-gart.o aperture.o
+obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary.o tce.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o
obj-$(CONFIG_X86_VSMP) += vsmp.o
+obj-$(CONFIG_K8_NB) += k8.o
+obj-$(CONFIG_AUDIT) += audit.o
obj-$(CONFIG_MODULES) += module.o
@@ -49,3 +52,5 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
quirks-y += ../../i386/kernel/quirks.o
i8237-y += ../../i386/kernel/i8237.o
msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
+alternative-y += ../../i386/kernel/alternative.o
+
diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
index 4fe97071f29..080b9963f1b 100644
--- a/arch/x86_64/kernel/acpi/Makefile
+++ b/arch/x86_64/kernel/acpi/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += processor.o
+processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
endif
diff --git a/arch/x86_64/kernel/acpi/processor.c b/arch/x86_64/kernel/acpi/processor.c
deleted file mode 100644
index 3bdc2baa5bb..00000000000
--- a/arch/x86_64/kernel/acpi/processor.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * arch/x86_64/kernel/acpi/processor.c
- *
- * Copyright (C) 2005 Intel Corporation
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- * - Added _PDC for platforms with Intel CPUs
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-
-#include <acpi/processor.h>
-#include <asm/acpi.h>
-
-static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
-{
- struct acpi_object_list *obj_list;
- union acpi_object *obj;
- u32 *buf;
-
- /* allocate and initialize pdc. It will be used later. */
- obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
- if (!obj_list) {
- printk(KERN_ERR "Memory allocation error\n");
- return;
- }
-
- obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
- if (!obj) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj_list);
- return;
- }
-
- buf = kmalloc(12, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj);
- kfree(obj_list);
- return;
- }
-
- buf[0] = ACPI_PDC_REVISION_ID;
- buf[1] = 1;
- buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
-
- obj->type = ACPI_TYPE_BUFFER;
- obj->buffer.length = 12;
- obj->buffer.pointer = (u8 *) buf;
- obj_list->count = 1;
- obj_list->pointer = obj;
- pr->pdc = obj_list;
-
- return;
-}
-
-/* Initialize _PDC data based on the CPU vendor */
-void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
-{
- unsigned int cpu = pr->id;
- struct cpuinfo_x86 *c = cpu_data + cpu;
-
- pr->pdc = NULL;
- if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
- init_intel_pdc(pr, c);
-
- return;
-}
-
-EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c
index 867a0ebee17..5ebf62c7a3d 100644
--- a/arch/x86_64/kernel/acpi/sleep.c
+++ b/arch/x86_64/kernel/acpi/sleep.c
@@ -26,7 +26,6 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -35,6 +34,8 @@
#include <linux/pci.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
+#include <linux/cpumask.h>
+
#include <asm/mpspec.h>
#include <asm/io.h>
#include <asm/apic.h>
@@ -66,7 +67,8 @@ static void init_low_mapping(void)
pgd_t *slot0 = pgd_offset(current->mm, 0UL);
low_ptr = *slot0;
set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
- flush_tlb_all();
+ WARN_ON(num_online_cpus() != 1);
+ local_flush_tlb();
}
/**
@@ -92,7 +94,7 @@ int acpi_save_state_mem(void)
void acpi_restore_state_mem(void)
{
set_pgd(pgd_offset(current->mm, 0UL), low_ptr);
- flush_tlb_all();
+ local_flush_tlb();
}
/**
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index 70b9d21ed67..58af8e73738 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -8,9 +8,7 @@
* because only the bootmem allocator can allocate 32+MB.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
- * $Id: aperture.c,v 1.7 2003/08/01 03:36:18 ak Exp $
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -24,6 +22,7 @@
#include <asm/proto.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
+#include <asm/k8.h>
int iommu_aperture;
int iommu_aperture_disabled __initdata = 0;
@@ -37,8 +36,6 @@ int fix_aperture __initdata = 1;
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
-#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16))
-
static u32 __init allocate_aperture(void)
{
pg_data_t *nd0 = NODE_DATA(0);
@@ -68,20 +65,20 @@ static u32 __init allocate_aperture(void)
return (u32)__pa(p);
}
-static int __init aperture_valid(char *name, u64 aper_base, u32 aper_size)
+static int __init aperture_valid(u64 aper_base, u32 aper_size)
{
if (!aper_base)
return 0;
if (aper_size < 64*1024*1024) {
- printk("Aperture from %s too small (%d MB)\n", name, aper_size>>20);
+ printk("Aperture too small (%d MB)\n", aper_size>>20);
return 0;
}
if (aper_base + aper_size >= 0xffffffff) {
- printk("Aperture from %s beyond 4GB. Ignoring.\n",name);
+ printk("Aperture beyond 4GB. Ignoring.\n");
return 0;
}
if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
- printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name);
+ printk("Aperture pointing to e820 RAM. Ignoring.\n");
return 0;
}
return 1;
@@ -140,7 +137,7 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order)
printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n",
aper, 32 << *order, apsizereg);
- if (!aperture_valid("AGP bridge", aper, (32*1024*1024) << *order))
+ if (!aperture_valid(aper, (32*1024*1024) << *order))
return 0;
return (u32)aper;
}
@@ -208,10 +205,10 @@ void __init iommu_hole_init(void)
fix = 0;
for (num = 24; num < 32; num++) {
- char name[30];
- if (read_pci_config(0, num, 3, 0x00) != NB_ID_3)
- continue;
+ if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
+ continue;
+ iommu_detected = 1;
iommu_aperture = 1;
aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7;
@@ -222,9 +219,7 @@ void __init iommu_hole_init(void)
printk("CPU %d: aperture @ %Lx size %u MB\n", num-24,
aper_base, aper_size>>20);
- sprintf(name, "northbridge cpu %d", num-24);
-
- if (!aperture_valid(name, aper_base, aper_size)) {
+ if (!aperture_valid(aper_base, aper_size)) {
fix = 1;
break;
}
@@ -273,7 +268,7 @@ void __init iommu_hole_init(void)
/* Fix up the north bridges */
for (num = 24; num < 32; num++) {
- if (read_pci_config(0, num, 3, 0x00) != NB_ID_3)
+ if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
continue;
/* Don't enable translation yet. That is done later.
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 100a30c4004..2b8cef037a6 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -14,7 +14,6 @@
* Mikael Pettersson : PM converted to driver model.
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -51,7 +50,7 @@ int disable_apic_timer __initdata;
static cpumask_t timer_interrupt_broadcast_ipi_mask;
/* Using APIC to generate smp_local_timer_interrupt? */
-int using_apic_timer = 0;
+int using_apic_timer __read_mostly = 0;
static void apic_pm_activate(void);
@@ -100,7 +99,7 @@ void clear_local_APIC(void)
maxlvt = get_maxlvt();
/*
- * Masking an LVT entry on a P6 can trigger a local APIC error
+ * Masking an LVT entry can trigger a local APIC error
* if the vector is zero. Mask LVTERR first to prevent this.
*/
if (maxlvt >= 3) {
@@ -851,7 +850,18 @@ void disable_APIC_timer(void)
unsigned long v;
v = apic_read(APIC_LVTT);
- apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
+ /*
+ * When an illegal vector value (0-15) is written to an LVT
+ * entry and delivery mode is Fixed, the APIC may signal an
+ * illegal vector error, with out regard to whether the mask
+ * bit is set or whether an interrupt is actually seen on input.
+ *
+ * Boot sequence might call this function when the LVTT has
+ * '0' vector value. So make sure vector field is set to
+ * valid value.
+ */
+ v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+ apic_write(APIC_LVTT, v);
}
}
@@ -909,15 +919,13 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL;
}
-#ifdef CONFIG_X86_MCE_AMD
-void setup_threshold_lvt(unsigned long lvt_off)
+void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
+ unsigned char msg_type, unsigned char mask)
{
- unsigned int v = 0;
- unsigned long reg = (lvt_off << 4) + 0x500;
- v |= THRESHOLD_APIC_VECTOR;
+ unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
+ unsigned int v = (mask << 16) | (msg_type << 8) | vector;
apic_write(reg, v);
}
-#endif /* CONFIG_X86_MCE_AMD */
#undef APIC_DIVISOR
@@ -983,7 +991,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
}
/*
- * oem_force_hpet_timer -- force HPET mode for some boxes.
+ * apic_is_clustered_box() -- Check if we can expect good TSC
*
* Thus far, the major user of this is IBM's Summit2 series:
*
@@ -991,7 +999,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
* multi-chassis. Use available data to take a good guess.
* If in doubt, go HPET.
*/
-__cpuinit int oem_force_hpet_timer(void)
+__cpuinit int apic_is_clustered_box(void)
{
int i, clusters, zeros;
unsigned id;
@@ -1022,8 +1030,7 @@ __cpuinit int oem_force_hpet_timer(void)
}
/*
- * If clusters > 2, then should be multi-chassis. Return 1 for HPET.
- * Else return 0 to use TSC.
+ * If clusters > 2, then should be multi-chassis.
* May have to revisit this when multi-core + hyperthreaded CPUs come
* out, but AFAIK this will work even for them.
*/
diff --git a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c
index 38834bbbae1..96687e2beb2 100644
--- a/arch/x86_64/kernel/asm-offsets.c
+++ b/arch/x86_64/kernel/asm-offsets.c
@@ -4,6 +4,7 @@
* and format the required data.
*/
+#include <linux/crypto.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/errno.h>
@@ -68,5 +69,7 @@ int main(void)
DEFINE(pbe_next, offsetof(struct pbe, next));
BLANK();
DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
+ BLANK();
+ DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
return 0;
}
diff --git a/arch/x86_64/kernel/audit.c b/arch/x86_64/kernel/audit.c
new file mode 100644
index 00000000000..a067aa468a8
--- /dev/null
+++ b/arch/x86_64/kernel/audit.c
@@ -0,0 +1,29 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static int __init audit_classes_init(void)
+{
+#ifdef CONFIG_IA32_EMULATION
+ extern __u32 ia32_dir_class[];
+ extern __u32 ia32_chattr_class[];
+ audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class);
+#endif
+ audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+ return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c
index 4e6c3b729e3..d8d5750d610 100644
--- a/arch/x86_64/kernel/crash.c
+++ b/arch/x86_64/kernel/crash.c
@@ -111,14 +111,14 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
for(;;)
- asm("hlt");
+ halt();
return 1;
}
static void smp_send_nmi_allbutself(void)
{
- send_IPI_allbutself(APIC_DM_NMI);
+ send_IPI_allbutself(NMI_VECTOR);
}
/*
@@ -161,7 +161,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
{
/*
* This function is only called after the system
- * has paniced or is otherwise in a critical state.
+ * has panicked or is otherwise in a critical state.
* The minimum amount of code to allow a kexec'd kernel
* to run successfully needs to happen here.
*
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 1ef6028f721..b8eee4c7888 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -1,7 +1,6 @@
/*
* Handle the memory map.
* The functions here do the job until bootmem takes over.
- * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
*
* Getting sanitize_e820_map() in sync with i386 version by applying change:
* - Provisions for empty E820 memory regions (reported by certain BIOSes).
@@ -9,7 +8,6 @@
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
*
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -621,6 +619,7 @@ void __init parse_memmapopt(char *p, char **from)
}
unsigned long pci_mem_start = 0xaeedbabe;
+EXPORT_SYMBOL(pci_mem_start);
/*
* Search for the biggest gap in the low 32 bits of the e820
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 586b34c00c4..ed92c298360 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -28,7 +28,6 @@
*/
#define ASSEMBLY 1
-#include <linux/config.h>
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/smp.h>
@@ -154,6 +153,7 @@ rff_trace:
GET_THREAD_INFO(%rcx)
jmp rff_action
CFI_ENDPROC
+END(ret_from_fork)
/*
* System call entry. Upto 6 arguments in registers are supported.
@@ -188,7 +188,7 @@ rff_trace:
ENTRY(system_call)
CFI_STARTPROC simple
- CFI_DEF_CFA rsp,0
+ CFI_DEF_CFA rsp,PDA_STACKOFFSET
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
swapgs
@@ -285,6 +285,7 @@ tracesys:
/* Use IRET because user could have changed frame */
jmp int_ret_from_sys_call
CFI_ENDPROC
+END(system_call)
/*
* Syscall return path ending with IRET.
@@ -364,6 +365,7 @@ int_restore_rest:
cli
jmp int_with_check
CFI_ENDPROC
+END(int_ret_from_sys_call)
/*
* Certain special system calls that need to save a complete full stack frame.
@@ -375,6 +377,7 @@ int_restore_rest:
leaq \func(%rip),%rax
leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
jmp ptregscall_common
+END(\label)
.endm
CFI_STARTPROC
@@ -404,6 +407,7 @@ ENTRY(ptregscall_common)
CFI_REL_OFFSET rip, 0
ret
CFI_ENDPROC
+END(ptregscall_common)
ENTRY(stub_execve)
CFI_STARTPROC
@@ -418,6 +422,7 @@ ENTRY(stub_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
+END(stub_execve)
/*
* sigreturn is special because it needs to restore all registers on return.
@@ -435,6 +440,7 @@ ENTRY(stub_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
+END(stub_rt_sigreturn)
/*
* initial frame state for interrupts and exceptions
@@ -466,29 +472,18 @@ ENTRY(stub_rt_sigreturn)
/* 0(%rsp): interrupt number */
.macro interrupt func
cld
-#ifdef CONFIG_DEBUG_INFO
- SAVE_ALL
- movq %rsp,%rdi
- /*
- * Setup a stack frame pointer. This allows gdb to trace
- * back to the original stack.
- */
- movq %rsp,%rbp
- CFI_DEF_CFA_REGISTER rbp
-#else
SAVE_ARGS
leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
-#endif
+ pushq %rbp
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_REL_OFFSET rbp, 0
+ movq %rsp,%rbp
+ CFI_DEF_CFA_REGISTER rbp
testl $3,CS(%rdi)
je 1f
swapgs
1: incl %gs:pda_irqcount # RED-PEN should check preempt count
- movq %gs:pda_irqstackptr,%rax
- cmoveq %rax,%rsp /*todo This needs CFI annotation! */
- pushq %rdi # save old stack
-#ifndef CONFIG_DEBUG_INFO
- CFI_ADJUST_CFA_OFFSET 8
-#endif
+ cmoveq %gs:pda_irqstackptr,%rsp
call \func
.endm
@@ -497,17 +492,11 @@ ENTRY(common_interrupt)
interrupt do_IRQ
/* 0(%rsp): oldrsp-ARGOFFSET */
ret_from_intr:
- popq %rdi
-#ifndef CONFIG_DEBUG_INFO
- CFI_ADJUST_CFA_OFFSET -8
-#endif
cli
decl %gs:pda_irqcount
-#ifdef CONFIG_DEBUG_INFO
- movq RBP(%rdi),%rbp
+ leaveq
CFI_DEF_CFA_REGISTER rsp
-#endif
- leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
+ CFI_ADJUST_CFA_OFFSET -8
exit_intr:
GET_THREAD_INFO(%rcx)
testl $3,CS-ARGOFFSET(%rsp)
@@ -589,14 +578,16 @@ retint_kernel:
call preempt_schedule_irq
jmp exit_intr
#endif
+
CFI_ENDPROC
+END(common_interrupt)
/*
* APIC interrupts.
*/
.macro apicinterrupt num,func
INTR_FRAME
- pushq $\num-256
+ pushq $~(\num)
CFI_ADJUST_CFA_OFFSET 8
interrupt \func
jmp ret_from_intr
@@ -605,17 +596,21 @@ retint_kernel:
ENTRY(thermal_interrupt)
apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
+END(thermal_interrupt)
ENTRY(threshold_interrupt)
apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
+END(threshold_interrupt)
#ifdef CONFIG_SMP
ENTRY(reschedule_interrupt)
apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
+END(reschedule_interrupt)
.macro INVALIDATE_ENTRY num
ENTRY(invalidate_interrupt\num)
apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
+END(invalidate_interrupt\num)
.endm
INVALIDATE_ENTRY 0
@@ -629,17 +624,21 @@ ENTRY(invalidate_interrupt\num)
ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
+END(call_function_interrupt)
#endif
#ifdef CONFIG_X86_LOCAL_APIC
ENTRY(apic_timer_interrupt)
apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
+END(apic_timer_interrupt)
ENTRY(error_interrupt)
apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
+END(error_interrupt)
ENTRY(spurious_interrupt)
apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
+END(spurious_interrupt)
#endif
/*
@@ -777,6 +776,7 @@ error_kernelspace:
cmpq $gs_change,RIP(%rsp)
je error_swapgs
jmp error_sti
+END(error_entry)
/* Reload gs selector with exception handling */
/* edi: new selector */
@@ -794,6 +794,7 @@ gs_change:
CFI_ADJUST_CFA_OFFSET -8
ret
CFI_ENDPROC
+ENDPROC(load_gs_index)
.section __ex_table,"a"
.align 8
@@ -847,7 +848,7 @@ ENTRY(kernel_thread)
UNFAKE_STACK_FRAME
ret
CFI_ENDPROC
-
+ENDPROC(kernel_thread)
child_rip:
/*
@@ -860,6 +861,7 @@ child_rip:
# exit
xorl %edi, %edi
call do_exit
+ENDPROC(child_rip)
/*
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
@@ -889,19 +891,24 @@ ENTRY(execve)
UNFAKE_STACK_FRAME
ret
CFI_ENDPROC
+ENDPROC(execve)
KPROBE_ENTRY(page_fault)
errorentry do_page_fault
+END(page_fault)
.previous .text
ENTRY(coprocessor_error)
zeroentry do_coprocessor_error
+END(coprocessor_error)
ENTRY(simd_coprocessor_error)
zeroentry do_simd_coprocessor_error
+END(simd_coprocessor_error)
ENTRY(device_not_available)
zeroentry math_state_restore
+END(device_not_available)
/* runs on exception stack */
KPROBE_ENTRY(debug)
@@ -911,6 +918,7 @@ KPROBE_ENTRY(debug)
paranoidentry do_debug, DEBUG_STACK
jmp paranoid_exit
CFI_ENDPROC
+END(debug)
.previous .text
/* runs on exception stack */
@@ -961,6 +969,7 @@ paranoid_schedule:
cli
jmp paranoid_userspace
CFI_ENDPROC
+END(nmi)
.previous .text
KPROBE_ENTRY(int3)
@@ -970,22 +979,28 @@ KPROBE_ENTRY(int3)
paranoidentry do_int3, DEBUG_STACK
jmp paranoid_exit
CFI_ENDPROC
+END(int3)
.previous .text
ENTRY(overflow)
zeroentry do_overflow
+END(overflow)
ENTRY(bounds)
zeroentry do_bounds
+END(bounds)
ENTRY(invalid_op)
zeroentry do_invalid_op
+END(invalid_op)
ENTRY(coprocessor_segment_overrun)
zeroentry do_coprocessor_segment_overrun
+END(coprocessor_segment_overrun)
ENTRY(reserved)
zeroentry do_reserved
+END(reserved)
/* runs on exception stack */
ENTRY(double_fault)
@@ -993,12 +1008,15 @@ ENTRY(double_fault)
paranoidentry do_double_fault
jmp paranoid_exit
CFI_ENDPROC
+END(double_fault)
ENTRY(invalid_TSS)
errorentry do_invalid_TSS
+END(invalid_TSS)
ENTRY(segment_not_present)
errorentry do_segment_not_present
+END(segment_not_present)
/* runs on exception stack */
ENTRY(stack_segment)
@@ -1006,19 +1024,24 @@ ENTRY(stack_segment)
paranoidentry do_stack_segment
jmp paranoid_exit
CFI_ENDPROC
+END(stack_segment)
KPROBE_ENTRY(general_protection)
errorentry do_general_protection
+END(general_protection)
.previous .text
ENTRY(alignment_check)
errorentry do_alignment_check
+END(alignment_check)
ENTRY(divide_error)
zeroentry do_divide_error
+END(divide_error)
ENTRY(spurious_interrupt_bug)
zeroentry do_spurious_interrupt_bug
+END(spurious_interrupt_bug)
#ifdef CONFIG_X86_MCE
/* runs on exception stack */
@@ -1029,6 +1052,7 @@ ENTRY(machine_check)
paranoidentry do_machine_check
jmp paranoid_exit
CFI_ENDPROC
+END(machine_check)
#endif
ENTRY(call_softirq)
@@ -1046,3 +1070,37 @@ ENTRY(call_softirq)
decl %gs:pda_irqcount
ret
CFI_ENDPROC
+ENDPROC(call_softirq)
+
+#ifdef CONFIG_STACK_UNWIND
+ENTRY(arch_unwind_init_running)
+ CFI_STARTPROC
+ movq %r15, R15(%rdi)
+ movq %r14, R14(%rdi)
+ xchgq %rsi, %rdx
+ movq %r13, R13(%rdi)
+ movq %r12, R12(%rdi)
+ xorl %eax, %eax
+ movq %rbp, RBP(%rdi)
+ movq %rbx, RBX(%rdi)
+ movq (%rsp), %rcx
+ movq %rax, R11(%rdi)
+ movq %rax, R10(%rdi)
+ movq %rax, R9(%rdi)
+ movq %rax, R8(%rdi)
+ movq %rax, RAX(%rdi)
+ movq %rax, RCX(%rdi)
+ movq %rax, RDX(%rdi)
+ movq %rax, RSI(%rdi)
+ movq %rax, RDI(%rdi)
+ movq %rax, ORIG_RAX(%rdi)
+ movq %rcx, RIP(%rdi)
+ leaq 8(%rsp), %rcx
+ movq $__KERNEL_CS, CS(%rdi)
+ movq %rax, EFLAGS(%rdi)
+ movq %rcx, RSP(%rdi)
+ movq $__KERNEL_DS, SS(%rdi)
+ jmpq *%rdx
+ CFI_ENDPROC
+ENDPROC(arch_unwind_init_running)
+#endif
diff --git a/arch/x86_64/kernel/functionlist b/arch/x86_64/kernel/functionlist
index 2bcebdc3eed..01fa23580c8 100644
--- a/arch/x86_64/kernel/functionlist
+++ b/arch/x86_64/kernel/functionlist
@@ -384,7 +384,6 @@
*(.text.__end_that_request_first)
*(.text.wake_up_bit)
*(.text.unuse_mm)
-*(.text.skb_release_data)
*(.text.shrink_icache_memory)
*(.text.sched_balance_self)
*(.text.__pmd_alloc)
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index 7a64ea18178..8e78a75d186 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -8,7 +8,6 @@
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
-#include <linux/config.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
diff --git a/arch/x86_64/kernel/genapic_cluster.c b/arch/x86_64/kernel/genapic_cluster.c
index 43fcf62fef0..3020917546d 100644
--- a/arch/x86_64/kernel/genapic_cluster.c
+++ b/arch/x86_64/kernel/genapic_cluster.c
@@ -9,7 +9,6 @@
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
-#include <linux/config.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
diff --git a/arch/x86_64/kernel/genapic_flat.c b/arch/x86_64/kernel/genapic_flat.c
index 1a2ab825be9..eb86d374813 100644
--- a/arch/x86_64/kernel/genapic_flat.c
+++ b/arch/x86_64/kernel/genapic_flat.c
@@ -8,7 +8,6 @@
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
-#include <linux/config.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
@@ -78,22 +77,29 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
static void flat_send_IPI_allbutself(int vector)
{
-#ifndef CONFIG_HOTPLUG_CPU
- if (((num_online_cpus()) - 1) >= 1)
- __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
+#ifdef CONFIG_HOTPLUG_CPU
+ int hotplug = 1;
#else
- cpumask_t allbutme = cpu_online_map;
+ int hotplug = 0;
+#endif
+ if (hotplug || vector == NMI_VECTOR) {
+ cpumask_t allbutme = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutme);
+ cpu_clear(smp_processor_id(), allbutme);
- if (!cpus_empty(allbutme))
- flat_send_IPI_mask(allbutme, vector);
-#endif
+ if (!cpus_empty(allbutme))
+ flat_send_IPI_mask(allbutme, vector);
+ } else if (num_online_cpus() > 1) {
+ __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
+ }
}
static void flat_send_IPI_all(int vector)
{
- __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
+ if (vector == NMI_VECTOR)
+ flat_send_IPI_mask(cpu_online_map, vector);
+ else
+ __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
}
static int flat_apic_id_registered(void)
@@ -108,10 +114,7 @@ static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
static unsigned int phys_pkg_id(int index_msb)
{
- u32 ebx;
-
- ebx = cpuid_ebx(1);
- return ((ebx >> 24) & 0xFF) >> index_msb;
+ return hard_smp_processor_id() >> index_msb;
}
struct genapic apic_flat = {
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index cea20a66c15..e6a71c9556d 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -2,8 +2,6 @@
* linux/arch/x86_64/kernel/head64.c -- prepare to run common code
*
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
- *
- * $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $
*/
#include <linux/init.h>
diff --git a/arch/x86_64/kernel/i387.c b/arch/x86_64/kernel/i387.c
index a5d7e16b928..3aa1e9bb781 100644
--- a/arch/x86_64/kernel/i387.c
+++ b/arch/x86_64/kernel/i387.c
@@ -14,7 +14,6 @@
* the 64bit user space sees a FXSAVE frame directly.
*/
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/processor.h>
@@ -24,7 +23,7 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
-unsigned int mxcsr_feature_mask = 0xffffffff;
+unsigned int mxcsr_feature_mask __read_mostly = 0xffffffff;
void mxcsr_feature_mask_init(void)
{
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 5ecd34ab8c2..0434b1f8e3d 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -1,5 +1,4 @@
#include <linux/linkage.h>
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
@@ -44,11 +43,11 @@
BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
BI(x,c) BI(x,d) BI(x,e) BI(x,f)
-#define BUILD_14_IRQS(x) \
+#define BUILD_15_IRQS(x) \
BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
- BI(x,c) BI(x,d)
+ BI(x,c) BI(x,d) BI(x,e)
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
@@ -73,13 +72,13 @@ BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
#ifdef CONFIG_PCI_MSI
- BUILD_14_IRQS(0xe)
+ BUILD_15_IRQS(0xe)
#endif
#endif
#undef BUILD_16_IRQS
-#undef BUILD_14_IRQS
+#undef BUILD_15_IRQS
#undef BI
@@ -92,11 +91,11 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
-#define IRQLIST_14(x) \
+#define IRQLIST_15(x) \
IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
- IRQ(x,c), IRQ(x,d)
+ IRQ(x,c), IRQ(x,d), IRQ(x,e)
void (*interrupt[NR_IRQS])(void) = {
IRQLIST_16(0x0),
@@ -108,7 +107,7 @@ void (*interrupt[NR_IRQS])(void) = {
IRQLIST_16(0xc), IRQLIST_16(0xd)
#ifdef CONFIG_PCI_MSI
- , IRQLIST_14(0xe)
+ , IRQLIST_15(0xe)
#endif
#endif
@@ -235,7 +234,7 @@ void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
- irq_desc[irq].handler = &i8259A_irq_type;
+ irq_desc[irq].chip = &i8259A_irq_type;
enable_irq(irq);
}
@@ -278,7 +277,7 @@ static void mask_and_ack_8259A(unsigned int irq)
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
* of hardware problems, so we only do the checks we can
- * do without slowing down good hardware unnecesserily.
+ * do without slowing down good hardware unnecessarily.
*
* Note that IRQ7 and IRQ15 (the two spurious IRQs
* usually resulting from the 8259A-1|2 PICs) occur
@@ -468,12 +467,12 @@ void __init init_ISA_irqs (void)
/*
* 16 old-style INTA-cycle interrupts:
*/
- irq_desc[i].handler = &i8259A_irq_type;
+ irq_desc[i].chip = &i8259A_irq_type;
} else {
/*
* 'high' PCI IRQs filled in on demand
*/
- irq_desc[i].handler = &no_irq_type;
+ irq_desc[i].chip = &no_irq_type;
}
}
}
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 9cc7031b715..924a4a33295 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -25,7 +25,6 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
-#include <linux/config.h>
#include <linux/smp_lock.h>
#include <linux/mc146818rtc.h>
#include <linux/acpi.h>
@@ -41,6 +40,7 @@
#include <asm/mach_apic.h>
#include <asm/acpi.h>
#include <asm/dma.h>
+#include <asm/nmi.h>
#define __apicdebuginit __init
@@ -56,6 +56,7 @@ int timer_over_8254 __initdata = 0;
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock);
+static DEFINE_SPINLOCK(vector_lock);
/*
* # of IRQ routing registers
@@ -317,7 +318,7 @@ void __init check_ioapic(void)
vendor &= 0xffff;
switch (vendor) {
case PCI_VENDOR_ID_VIA:
-#ifdef CONFIG_GART_IOMMU
+#ifdef CONFIG_IOMMU
if ((end_pfn > MAX_DMA32_PFN ||
force_iommu) &&
!iommu_aperture_allowed) {
@@ -834,10 +835,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
int assign_irq_vector(int irq)
{
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
+ unsigned long flags;
+ int vector;
BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
- if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
+
+ spin_lock_irqsave(&vector_lock, flags);
+
+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
+ spin_unlock_irqrestore(&vector_lock, flags);
return IO_APIC_VECTOR(irq);
+ }
next:
current_vector += 8;
if (current_vector == IA32_SYSCALL_VECTOR)
@@ -849,11 +857,14 @@ next:
current_vector = FIRST_DEVICE_VECTOR + offset;
}
- vector_irq[current_vector] = irq;
+ vector = current_vector;
+ vector_irq[vector] = irq;
if (irq != AUTO_ASSIGN)
- IO_APIC_VECTOR(irq) = current_vector;
+ IO_APIC_VECTOR(irq) = vector;
- return current_vector;
+ spin_unlock_irqrestore(&vector_lock, flags);
+
+ return vector;
}
extern void (*interrupt[NR_IRQS])(void);
@@ -864,23 +875,18 @@ static struct hw_interrupt_type ioapic_edge_type;
#define IOAPIC_EDGE 0
#define IOAPIC_LEVEL 1
-static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
+static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{
- if (use_pci_vector() && !platform_legacy_irq(irq)) {
- if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
- trigger == IOAPIC_LEVEL)
- irq_desc[vector].handler = &ioapic_level_type;
- else
- irq_desc[vector].handler = &ioapic_edge_type;
- set_intr_gate(vector, interrupt[vector]);
- } else {
- if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
- trigger == IOAPIC_LEVEL)
- irq_desc[irq].handler = &ioapic_level_type;
- else
- irq_desc[irq].handler = &ioapic_edge_type;
- set_intr_gate(vector, interrupt[irq]);
- }
+ unsigned idx;
+
+ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
+
+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+ trigger == IOAPIC_LEVEL)
+ irq_desc[idx].chip = &ioapic_level_type;
+ else
+ irq_desc[idx].chip = &ioapic_edge_type;
+ set_intr_gate(vector, interrupt[idx]);
}
static void __init setup_IO_APIC_irqs(void)
@@ -981,7 +987,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
* The timer IRQ doesn't have to know that behind the
* scene we have a 8259A-master in AEOI mode ...
*/
- irq_desc[0].handler = &ioapic_edge_type;
+ irq_desc[0].chip = &ioapic_edge_type;
/*
* Add it to the IO-APIC irq-routing table:
@@ -1611,6 +1617,13 @@ static void set_ioapic_affinity_vector (unsigned int vector,
#endif // CONFIG_SMP
#endif // CONFIG_PCI_MSI
+static int ioapic_retrigger(unsigned int irq)
+{
+ send_IPI_self(IO_APIC_VECTOR(irq));
+
+ return 1;
+}
+
/*
* Level and edge triggered IO-APIC interrupts need different handling,
* so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -1631,6 +1644,7 @@ static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
#ifdef CONFIG_SMP
.set_affinity = set_ioapic_affinity,
#endif
+ .retrigger = ioapic_retrigger,
};
static struct hw_interrupt_type ioapic_level_type __read_mostly = {
@@ -1644,6 +1658,7 @@ static struct hw_interrupt_type ioapic_level_type __read_mostly = {
#ifdef CONFIG_SMP
.set_affinity = set_ioapic_affinity,
#endif
+ .retrigger = ioapic_retrigger,
};
static inline void init_IO_APIC_traps(void)
@@ -1678,7 +1693,7 @@ static inline void init_IO_APIC_traps(void)
make_8259A_irq(irq);
else
/* Strange. Oh, well.. */
- irq_desc[irq].handler = &no_irq_type;
+ irq_desc[irq].chip = &no_irq_type;
}
}
}
@@ -1895,7 +1910,7 @@ static inline void check_timer(void)
apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
disable_8259A_irq(0);
- irq_desc[0].handler = &lapic_irq_type;
+ irq_desc[0].chip = &lapic_irq_type;
apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
enable_8259A_irq(0);
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index d8bd0b345b1..a1f1df5f7bf 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -26,6 +26,30 @@ atomic_t irq_mis_count;
#endif
#endif
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+/*
+ * Probabilistic stack overflow check:
+ *
+ * Only check the stack in process context, because everything else
+ * runs on the big interrupt stacks. Checking reliably is too expensive,
+ * so we just check from interrupts.
+ */
+static inline void stack_overflow_check(struct pt_regs *regs)
+{
+ u64 curbase = (u64) current->thread_info;
+ static unsigned long warned = -60*HZ;
+
+ if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
+ regs->rsp < curbase + sizeof(struct thread_info) + 128 &&
+ time_after(jiffies, warned + 60*HZ)) {
+ printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
+ current->comm, curbase, regs->rsp);
+ show_stack(NULL,NULL);
+ warned = jiffies;
+ }
+}
+#endif
+
/*
* Generic, controller-independent functions:
*/
@@ -39,7 +63,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) {
seq_printf(p, " ");
for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
+ seq_printf(p, "CPU%-8d",j);
seq_putc(p, '\n');
}
@@ -55,7 +79,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -91,12 +115,20 @@ skip:
*/
asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
{
- /* high bits used in ret_from_ code */
- unsigned irq = regs->orig_rax & 0xff;
+ /* high bit used in ret_from_ code */
+ unsigned irq = ~regs->orig_rax;
+
+ if (unlikely(irq >= NR_IRQS)) {
+ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+ __FUNCTION__, irq);
+ BUG();
+ }
exit_idle();
irq_enter();
-
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ stack_overflow_check(regs);
+#endif
__do_IRQ(irq, regs);
irq_exit();
@@ -114,13 +146,13 @@ void fixup_irqs(cpumask_t map)
if (irq == 2)
continue;
- cpus_and(mask, irq_affinity[irq], map);
+ cpus_and(mask, irq_desc[irq].affinity, map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
}
- if (irq_desc[irq].handler->set_affinity)
- irq_desc[irq].handler->set_affinity(irq, mask);
+ if (irq_desc[irq].chip->set_affinity)
+ irq_desc[irq].chip->set_affinity(irq, mask);
else if (irq_desc[irq].action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
diff --git a/arch/x86_64/kernel/k8.c b/arch/x86_64/kernel/k8.c
new file mode 100644
index 00000000000..6416682d33d
--- /dev/null
+++ b/arch/x86_64/kernel/k8.c
@@ -0,0 +1,118 @@
+/*
+ * Shared support code for AMD K8 northbridges and derivates.
+ * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
+ */
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/k8.h>
+
+int num_k8_northbridges;
+EXPORT_SYMBOL(num_k8_northbridges);
+
+static u32 *flush_words;
+
+struct pci_device_id k8_nb_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
+ {}
+};
+EXPORT_SYMBOL(k8_nb_ids);
+
+struct pci_dev **k8_northbridges;
+EXPORT_SYMBOL(k8_northbridges);
+
+static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
+{
+ do {
+ dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
+ if (!dev)
+ break;
+ } while (!pci_match_id(&k8_nb_ids[0], dev));
+ return dev;
+}
+
+int cache_k8_northbridges(void)
+{
+ int i;
+ struct pci_dev *dev;
+ if (num_k8_northbridges)
+ return 0;
+
+ num_k8_northbridges = 0;
+ dev = NULL;
+ while ((dev = next_k8_northbridge(dev)) != NULL)
+ num_k8_northbridges++;
+
+ k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *),
+ GFP_KERNEL);
+ if (!k8_northbridges)
+ return -ENOMEM;
+
+ flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL);
+ if (!flush_words) {
+ kfree(k8_northbridges);
+ return -ENOMEM;
+ }
+
+ dev = NULL;
+ i = 0;
+ while ((dev = next_k8_northbridge(dev)) != NULL) {
+ k8_northbridges[i++] = dev;
+ pci_read_config_dword(dev, 0x9c, &flush_words[i]);
+ }
+ k8_northbridges[i] = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cache_k8_northbridges);
+
+/* Ignores subdevice/subvendor but as far as I can figure out
+ they're useless anyways */
+int __init early_is_k8_nb(u32 device)
+{
+ struct pci_device_id *id;
+ u32 vendor = device & 0xffff;
+ device >>= 16;
+ for (id = k8_nb_ids; id->vendor; id++)
+ if (vendor == id->vendor && device == id->device)
+ return 1;
+ return 0;
+}
+
+void k8_flush_garts(void)
+{
+ int flushed, i;
+ unsigned long flags;
+ static DEFINE_SPINLOCK(gart_lock);
+
+ /* Avoid races between AGP and IOMMU. In theory it's not needed
+ but I'm not sure if the hardware won't lose flush requests
+ when another is pending. This whole thing is so expensive anyways
+ that it doesn't matter to serialize more. -AK */
+ spin_lock_irqsave(&gart_lock, flags);
+ flushed = 0;
+ for (i = 0; i < num_k8_northbridges; i++) {
+ pci_write_config_dword(k8_northbridges[i], 0x9c,
+ flush_words[i]|1);
+ flushed++;
+ }
+ for (i = 0; i < num_k8_northbridges; i++) {
+ u32 w;
+ /* Make sure the hardware actually executed the flush*/
+ for (;;) {
+ pci_read_config_dword(k8_northbridges[i],
+ 0x9c, &w);
+ if (!(w & 1))
+ break;
+ cpu_relax();
+ }
+ }
+ spin_unlock_irqrestore(&gart_lock, flags);
+ if (!flushed)
+ printk("nothing to flush?\n");
+}
+EXPORT_SYMBOL_GPL(k8_flush_garts);
+
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index fa1d19ca700..ffc73ac7248 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -31,7 +31,6 @@
* Added function return probes functionality
*/
-#include <linux/config.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/string.h>
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 25ac8a3faae..83fb24a0282 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -149,8 +149,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
unsigned long start_address,
unsigned long pgtable) ATTRIB_NORET;
-const extern unsigned char relocate_new_kernel[];
-const extern unsigned long relocate_new_kernel_size;
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned long relocate_new_kernel_size;
int machine_kexec_prepare(struct kimage *image)
{
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index c69fc43cee7..88845674c66 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -562,7 +562,7 @@ static struct sysdev_class mce_sysclass = {
set_kset_name("machinecheck"),
};
-static DEFINE_PER_CPU(struct sys_device, device_mce);
+DEFINE_PER_CPU(struct sys_device, device_mce);
/* Why are there no generic functions for this? */
#define ACCESSOR(name, var, start) \
@@ -629,7 +629,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
#endif
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int
+static __cpuinit int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -647,7 +647,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static struct notifier_block mce_cpu_notifier = {
+static struct notifier_block __cpuinitdata mce_cpu_notifier = {
.notifier_call = mce_cpu_callback,
};
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index d13b241ad09..335200aa273 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -1,5 +1,5 @@
/*
- * (c) 2005 Advanced Micro Devices, Inc.
+ * (c) 2005, 2006 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
@@ -8,9 +8,10 @@
*
* Support : jacob.shin@amd.com
*
- * MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F.
- * MC4_MISC0 exists per physical processor.
+ * April 2006
+ * - added support for AMD Family 0x10 processors
*
+ * All MC4_MISCi registers are shared between multi-cores
*/
#include <linux/cpu.h>
@@ -29,32 +30,45 @@
#include <asm/percpu.h>
#include <asm/idle.h>
-#define PFX "mce_threshold: "
-#define VERSION "version 1.00.9"
-#define NR_BANKS 5
-#define THRESHOLD_MAX 0xFFF
-#define INT_TYPE_APIC 0x00020000
-#define MASK_VALID_HI 0x80000000
-#define MASK_LVTOFF_HI 0x00F00000
-#define MASK_COUNT_EN_HI 0x00080000
-#define MASK_INT_TYPE_HI 0x00060000
-#define MASK_OVERFLOW_HI 0x00010000
+#define PFX "mce_threshold: "
+#define VERSION "version 1.1.1"
+#define NR_BANKS 6
+#define NR_BLOCKS 9
+#define THRESHOLD_MAX 0xFFF
+#define INT_TYPE_APIC 0x00020000
+#define MASK_VALID_HI 0x80000000
+#define MASK_LVTOFF_HI 0x00F00000
+#define MASK_COUNT_EN_HI 0x00080000
+#define MASK_INT_TYPE_HI 0x00060000
+#define MASK_OVERFLOW_HI 0x00010000
#define MASK_ERR_COUNT_HI 0x00000FFF
-#define MASK_OVERFLOW 0x0001000000000000L
+#define MASK_BLKPTR_LO 0xFF000000
+#define MCG_XBLK_ADDR 0xC0000400
-struct threshold_bank {
+struct threshold_block {
+ unsigned int block;
+ unsigned int bank;
unsigned int cpu;
- u8 bank;
- u8 interrupt_enable;
+ u32 address;
+ u16 interrupt_enable;
u16 threshold_limit;
struct kobject kobj;
+ struct list_head miscj;
};
-static struct threshold_bank threshold_defaults = {
+/* defaults used early on boot */
+static struct threshold_block threshold_defaults = {
.interrupt_enable = 0,
.threshold_limit = THRESHOLD_MAX,
};
+struct threshold_bank {
+ struct kobject kobj;
+ struct threshold_block *blocks;
+ cpumask_t cpus;
+};
+static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
+
#ifdef CONFIG_SMP
static unsigned char shared_bank[NR_BANKS] = {
0, 0, 0, 0, 1
@@ -68,12 +82,12 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
*/
/* must be called with correct cpu affinity */
-static void threshold_restart_bank(struct threshold_bank *b,
+static void threshold_restart_bank(struct threshold_block *b,
int reset, u16 old_limit)
{
u32 mci_misc_hi, mci_misc_lo;
- rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
+ rdmsr(b->address, mci_misc_lo, mci_misc_hi);
if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
reset = 1; /* limit cannot be lower than err count */
@@ -94,35 +108,57 @@ static void threshold_restart_bank(struct threshold_bank *b,
(mci_misc_hi &= ~MASK_INT_TYPE_HI);
mci_misc_hi |= MASK_COUNT_EN_HI;
- wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
+ wrmsr(b->address, mci_misc_lo, mci_misc_hi);
}
+/* cpu init entry point, called from mce.c with preempt off */
void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
{
- int bank;
- u32 mci_misc_lo, mci_misc_hi;
+ unsigned int bank, block;
unsigned int cpu = smp_processor_id();
+ u32 low = 0, high = 0, address = 0;
for (bank = 0; bank < NR_BANKS; ++bank) {
- rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi);
+ for (block = 0; block < NR_BLOCKS; ++block) {
+ if (block == 0)
+ address = MSR_IA32_MC0_MISC + bank * 4;
+ else if (block == 1)
+ address = MCG_XBLK_ADDR
+ + ((low & MASK_BLKPTR_LO) >> 21);
+ else
+ ++address;
+
+ if (rdmsr_safe(address, &low, &high))
+ continue;
- /* !valid, !counter present, bios locked */
- if (!(mci_misc_hi & MASK_VALID_HI) ||
- !(mci_misc_hi & MASK_VALID_HI >> 1) ||
- (mci_misc_hi & MASK_VALID_HI >> 2))
- continue;
+ if (!(high & MASK_VALID_HI)) {
+ if (block)
+ continue;
+ else
+ break;
+ }
- per_cpu(bank_map, cpu) |= (1 << bank);
+ if (!(high & MASK_VALID_HI >> 1) ||
+ (high & MASK_VALID_HI >> 2))
+ continue;
+ if (!block)
+ per_cpu(bank_map, cpu) |= (1 << bank);
#ifdef CONFIG_SMP
- if (shared_bank[bank] && cpu_core_id[cpu])
- continue;
+ if (shared_bank[bank] && c->cpu_core_id)
+ break;
#endif
+ high &= ~MASK_LVTOFF_HI;
+ high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20;
+ wrmsr(address, low, high);
- setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20);
- threshold_defaults.cpu = cpu;
- threshold_defaults.bank = bank;
- threshold_restart_bank(&threshold_defaults, 0, 0);
+ setup_APIC_extened_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
+ THRESHOLD_APIC_VECTOR,
+ K8_APIC_EXT_INT_MSG_FIX, 0);
+
+ threshold_defaults.address = address;
+ threshold_restart_bank(&threshold_defaults, 0, 0);
+ }
}
}
@@ -137,8 +173,9 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
*/
asmlinkage void mce_threshold_interrupt(void)
{
- int bank;
+ unsigned int bank, block;
struct mce m;
+ u32 low = 0, high = 0, address = 0;
ack_APIC_irq();
exit_idle();
@@ -150,15 +187,42 @@ asmlinkage void mce_threshold_interrupt(void)
/* assume first bank caused it */
for (bank = 0; bank < NR_BANKS; ++bank) {
- m.bank = MCE_THRESHOLD_BASE + bank;
- rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc);
+ for (block = 0; block < NR_BLOCKS; ++block) {
+ if (block == 0)
+ address = MSR_IA32_MC0_MISC + bank * 4;
+ else if (block == 1)
+ address = MCG_XBLK_ADDR
+ + ((low & MASK_BLKPTR_LO) >> 21);
+ else
+ ++address;
+
+ if (rdmsr_safe(address, &low, &high))
+ continue;
- if (m.misc & MASK_OVERFLOW) {
- mce_log(&m);
- goto out;
+ if (!(high & MASK_VALID_HI)) {
+ if (block)
+ continue;
+ else
+ break;
+ }
+
+ if (!(high & MASK_VALID_HI >> 1) ||
+ (high & MASK_VALID_HI >> 2))
+ continue;
+
+ if (high & MASK_OVERFLOW_HI) {
+ rdmsrl(address, m.misc);
+ rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
+ m.status);
+ m.bank = K8_MCE_THRESHOLD_BASE
+ + bank * NR_BLOCKS
+ + block;
+ mce_log(&m);
+ goto out;
+ }
}
}
- out:
+out:
irq_exit();
}
@@ -166,20 +230,12 @@ asmlinkage void mce_threshold_interrupt(void)
* Sysfs Interface
*/
-static struct sysdev_class threshold_sysclass = {
- set_kset_name("threshold"),
-};
-
-static DEFINE_PER_CPU(struct sys_device, device_threshold);
-
struct threshold_attr {
- struct attribute attr;
- ssize_t(*show) (struct threshold_bank *, char *);
- ssize_t(*store) (struct threshold_bank *, const char *, size_t count);
+ struct attribute attr;
+ ssize_t(*show) (struct threshold_block *, char *);
+ ssize_t(*store) (struct threshold_block *, const char *, size_t count);
};
-static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
-
static cpumask_t affinity_set(unsigned int cpu)
{
cpumask_t oldmask = current->cpus_allowed;
@@ -194,15 +250,15 @@ static void affinity_restore(cpumask_t oldmask)
set_cpus_allowed(current, oldmask);
}
-#define SHOW_FIELDS(name) \
- static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \
- { \
- return sprintf(buf, "%lx\n", (unsigned long) b->name); \
- }
+#define SHOW_FIELDS(name) \
+static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
+{ \
+ return sprintf(buf, "%lx\n", (unsigned long) b->name); \
+}
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)
-static ssize_t store_interrupt_enable(struct threshold_bank *b,
+static ssize_t store_interrupt_enable(struct threshold_block *b,
const char *buf, size_t count)
{
char *end;
@@ -219,7 +275,7 @@ static ssize_t store_interrupt_enable(struct threshold_bank *b,
return end - buf;
}
-static ssize_t store_threshold_limit(struct threshold_bank *b,
+static ssize_t store_threshold_limit(struct threshold_block *b,
const char *buf, size_t count)
{
char *end;
@@ -242,18 +298,18 @@ static ssize_t store_threshold_limit(struct threshold_bank *b,
return end - buf;
}
-static ssize_t show_error_count(struct threshold_bank *b, char *buf)
+static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
u32 high, low;
cpumask_t oldmask;
oldmask = affinity_set(b->cpu);
- rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */
+ rdmsr(b->address, low, high);
affinity_restore(oldmask);
return sprintf(buf, "%x\n",
(high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
}
-static ssize_t store_error_count(struct threshold_bank *b,
+static ssize_t store_error_count(struct threshold_block *b,
const char *buf, size_t count)
{
cpumask_t oldmask;
@@ -269,13 +325,13 @@ static ssize_t store_error_count(struct threshold_bank *b,
.store = _store, \
};
-#define ATTR_FIELDS(name) \
- static struct threshold_attr name = \
+#define RW_ATTR(name) \
+static struct threshold_attr name = \
THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
-ATTR_FIELDS(interrupt_enable);
-ATTR_FIELDS(threshold_limit);
-ATTR_FIELDS(error_count);
+RW_ATTR(interrupt_enable);
+RW_ATTR(threshold_limit);
+RW_ATTR(error_count);
static struct attribute *default_attrs[] = {
&interrupt_enable.attr,
@@ -284,12 +340,12 @@ static struct attribute *default_attrs[] = {
NULL
};
-#define to_bank(k) container_of(k,struct threshold_bank,kobj)
-#define to_attr(a) container_of(a,struct threshold_attr,attr)
+#define to_block(k) container_of(k, struct threshold_block, kobj)
+#define to_attr(a) container_of(a, struct threshold_attr, attr)
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
- struct threshold_bank *b = to_bank(kobj);
+ struct threshold_block *b = to_block(kobj);
struct threshold_attr *a = to_attr(attr);
ssize_t ret;
ret = a->show ? a->show(b, buf) : -EIO;
@@ -299,7 +355,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
static ssize_t store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
- struct threshold_bank *b = to_bank(kobj);
+ struct threshold_block *b = to_block(kobj);
struct threshold_attr *a = to_attr(attr);
ssize_t ret;
ret = a->store ? a->store(b, buf, count) : -EIO;
@@ -316,69 +372,174 @@ static struct kobj_type threshold_ktype = {
.default_attrs = default_attrs,
};
+static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
+ unsigned int bank,
+ unsigned int block,
+ u32 address)
+{
+ int err;
+ u32 low, high;
+ struct threshold_block *b = NULL;
+
+ if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
+ return 0;
+
+ if (rdmsr_safe(address, &low, &high))
+ goto recurse;
+
+ if (!(high & MASK_VALID_HI)) {
+ if (block)
+ goto recurse;
+ else
+ return 0;
+ }
+
+ if (!(high & MASK_VALID_HI >> 1) ||
+ (high & MASK_VALID_HI >> 2))
+ goto recurse;
+
+ b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+ memset(b, 0, sizeof(struct threshold_block));
+
+ b->block = block;
+ b->bank = bank;
+ b->cpu = cpu;
+ b->address = address;
+ b->interrupt_enable = 0;
+ b->threshold_limit = THRESHOLD_MAX;
+
+ INIT_LIST_HEAD(&b->miscj);
+
+ if (per_cpu(threshold_banks, cpu)[bank]->blocks)
+ list_add(&b->miscj,
+ &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
+ else
+ per_cpu(threshold_banks, cpu)[bank]->blocks = b;
+
+ kobject_set_name(&b->kobj, "misc%i", block);
+ b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj;
+ b->kobj.ktype = &threshold_ktype;
+ err = kobject_register(&b->kobj);
+ if (err)
+ goto out_free;
+recurse:
+ if (!block) {
+ address = (low & MASK_BLKPTR_LO) >> 21;
+ if (!address)
+ return 0;
+ address += MCG_XBLK_ADDR;
+ } else
+ ++address;
+
+ err = allocate_threshold_blocks(cpu, bank, ++block, address);
+ if (err)
+ goto out_free;
+
+ return err;
+
+out_free:
+ if (b) {
+ kobject_unregister(&b->kobj);
+ kfree(b);
+ }
+ return err;
+}
+
/* symlinks sibling shared banks to first core. first core owns dir/files. */
-static __cpuinit int threshold_create_bank(unsigned int cpu, int bank)
+static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
{
- int err = 0;
+ int i, err = 0;
struct threshold_bank *b = NULL;
+ cpumask_t oldmask = CPU_MASK_NONE;
+ char name[32];
+
+ sprintf(name, "threshold_bank%i", bank);
#ifdef CONFIG_SMP
- if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */
- char name[16];
- unsigned lcpu = first_cpu(cpu_core_map[cpu]);
- if (cpu_core_id[lcpu])
- goto out; /* first core not up yet */
+ if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
+ i = first_cpu(cpu_core_map[cpu]);
+
+ /* first core not up yet */
+ if (cpu_data[i].cpu_core_id)
+ goto out;
+
+ /* already linked */
+ if (per_cpu(threshold_banks, cpu)[bank])
+ goto out;
+
+ b = per_cpu(threshold_banks, i)[bank];
- b = per_cpu(threshold_banks, lcpu)[bank];
if (!b)
goto out;
- sprintf(name, "bank%i", bank);
- err = sysfs_create_link(&per_cpu(device_threshold, cpu).kobj,
+
+ err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
&b->kobj, name);
if (err)
goto out;
+
+ b->cpus = cpu_core_map[cpu];
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
}
#endif
- b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL);
+ b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
if (!b) {
err = -ENOMEM;
goto out;
}
memset(b, 0, sizeof(struct threshold_bank));
- b->cpu = cpu;
- b->bank = bank;
- b->interrupt_enable = 0;
- b->threshold_limit = THRESHOLD_MAX;
- kobject_set_name(&b->kobj, "bank%i", bank);
- b->kobj.parent = &per_cpu(device_threshold, cpu).kobj;
- b->kobj.ktype = &threshold_ktype;
-
+ kobject_set_name(&b->kobj, "threshold_bank%i", bank);
+ b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
+#ifndef CONFIG_SMP
+ b->cpus = CPU_MASK_ALL;
+#else
+ b->cpus = cpu_core_map[cpu];
+#endif
err = kobject_register(&b->kobj);
- if (err) {
- kfree(b);
- goto out;
- }
+ if (err)
+ goto out_free;
+
per_cpu(threshold_banks, cpu)[bank] = b;
- out:
+
+ oldmask = affinity_set(cpu);
+ err = allocate_threshold_blocks(cpu, bank, 0,
+ MSR_IA32_MC0_MISC + bank * 4);
+ affinity_restore(oldmask);
+
+ if (err)
+ goto out_free;
+
+ for_each_cpu_mask(i, b->cpus) {
+ if (i == cpu)
+ continue;
+
+ err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
+ &b->kobj, name);
+ if (err)
+ goto out;
+
+ per_cpu(threshold_banks, i)[bank] = b;
+ }
+
+ goto out;
+
+out_free:
+ per_cpu(threshold_banks, cpu)[bank] = NULL;
+ kfree(b);
+out:
return err;
}
/* create dir/files for all valid threshold banks */
static __cpuinit int threshold_create_device(unsigned int cpu)
{
- int bank;
+ unsigned int bank;
int err = 0;
- per_cpu(device_threshold, cpu).id = cpu;
- per_cpu(device_threshold, cpu).cls = &threshold_sysclass;
- err = sysdev_register(&per_cpu(device_threshold, cpu));
- if (err)
- goto out;
-
for (bank = 0; bank < NR_BANKS; ++bank) {
if (!(per_cpu(bank_map, cpu) & 1 << bank))
continue;
@@ -386,7 +547,7 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
if (err)
goto out;
}
- out:
+out:
return err;
}
@@ -397,92 +558,85 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
* of shared sysfs dir/files, and rest of the cores will be symlinked to it.
*/
-/* cpu hotplug call removes all symlinks before first core dies */
+static __cpuinit void deallocate_threshold_block(unsigned int cpu,
+ unsigned int bank)
+{
+ struct threshold_block *pos = NULL;
+ struct threshold_block *tmp = NULL;
+ struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
+
+ if (!head)
+ return;
+
+ list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
+ kobject_unregister(&pos->kobj);
+ list_del(&pos->miscj);
+ kfree(pos);
+ }
+
+ kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
+ per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
+}
+
static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
{
+ int i = 0;
struct threshold_bank *b;
- char name[16];
+ char name[32];
b = per_cpu(threshold_banks, cpu)[bank];
+
if (!b)
return;
- if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) {
- sprintf(name, "bank%i", bank);
- sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name);
- per_cpu(threshold_banks, cpu)[bank] = NULL;
- } else {
- kobject_unregister(&b->kobj);
- kfree(per_cpu(threshold_banks, cpu)[bank]);
+
+ if (!b->blocks)
+ goto free_out;
+
+ sprintf(name, "threshold_bank%i", bank);
+
+ /* sibling symlink */
+ if (shared_bank[bank] && b->blocks->cpu != cpu) {
+ sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
+ per_cpu(threshold_banks, i)[bank] = NULL;
+ return;
+ }
+
+ /* remove all sibling symlinks before unregistering */
+ for_each_cpu_mask(i, b->cpus) {
+ if (i == cpu)
+ continue;
+
+ sysfs_remove_link(&per_cpu(device_mce, i).kobj, name);
+ per_cpu(threshold_banks, i)[bank] = NULL;
}
+
+ deallocate_threshold_block(cpu, bank);
+
+free_out:
+ kobject_unregister(&b->kobj);
+ kfree(b);
+ per_cpu(threshold_banks, cpu)[bank] = NULL;
}
static __cpuinit void threshold_remove_device(unsigned int cpu)
{
- int bank;
+ unsigned int bank;
for (bank = 0; bank < NR_BANKS; ++bank) {
if (!(per_cpu(bank_map, cpu) & 1 << bank))
continue;
threshold_remove_bank(cpu, bank);
}
- sysdev_unregister(&per_cpu(device_threshold, cpu));
}
-/* link all existing siblings when first core comes up */
-static __cpuinit int threshold_create_symlinks(unsigned int cpu)
-{
- int bank, err = 0;
- unsigned int lcpu = 0;
-
- if (cpu_core_id[cpu])
- return 0;
- for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
- if (lcpu == cpu)
- continue;
- for (bank = 0; bank < NR_BANKS; ++bank) {
- if (!(per_cpu(bank_map, cpu) & 1 << bank))
- continue;
- if (!shared_bank[bank])
- continue;
- err = threshold_create_bank(lcpu, bank);
- }
- }
- return err;
-}
-
-/* remove all symlinks before first core dies. */
-static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
-{
- int bank;
- unsigned int lcpu = 0;
- if (cpu_core_id[cpu])
- return;
- for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
- if (lcpu == cpu)
- continue;
- for (bank = 0; bank < NR_BANKS; ++bank) {
- if (!(per_cpu(bank_map, cpu) & 1 << bank))
- continue;
- if (!shared_bank[bank])
- continue;
- threshold_remove_bank(lcpu, bank);
- }
- }
-}
#else /* !CONFIG_HOTPLUG_CPU */
-static __cpuinit void threshold_create_symlinks(unsigned int cpu)
-{
-}
-static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
-{
-}
static void threshold_remove_device(unsigned int cpu)
{
}
#endif
/* get notified when a cpu comes on/off */
-static int threshold_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
/* cpu was unsigned int to begin with */
@@ -494,13 +648,6 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
threshold_create_device(cpu);
- threshold_create_symlinks(cpu);
- break;
- case CPU_DOWN_PREPARE:
- threshold_remove_symlinks(cpu);
- break;
- case CPU_DOWN_FAILED:
- threshold_create_symlinks(cpu);
break;
case CPU_DEAD:
threshold_remove_device(cpu);
@@ -512,29 +659,22 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block threshold_cpu_notifier = {
+static struct notifier_block threshold_cpu_notifier __cpuinitdata = {
.notifier_call = threshold_cpu_callback,
};
static __init int threshold_init_device(void)
{
- int err;
- int lcpu = 0;
-
- err = sysdev_class_register(&threshold_sysclass);
- if (err)
- goto out;
+ unsigned lcpu = 0;
/* to hit CPUs online before the notifier is up */
for_each_online_cpu(lcpu) {
- err = threshold_create_device(lcpu);
+ int err = threshold_create_device(lcpu);
if (err)
- goto out;
+ return err;
}
register_cpu_notifier(&threshold_cpu_notifier);
-
- out:
- return err;
+ return 0;
}
device_initcall(threshold_init_device);
diff --git a/arch/x86_64/kernel/module.c b/arch/x86_64/kernel/module.c
index bac195c74bc..9d0958ff547 100644
--- a/arch/x86_64/kernel/module.c
+++ b/arch/x86_64/kernel/module.c
@@ -145,26 +145,38 @@ int apply_relocate(Elf_Shdr *sechdrs,
return -ENOSYS;
}
-extern void apply_alternatives(void *start, void *end);
-
int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
+ const Elf_Shdr *sechdrs,
+ struct module *me)
{
- const Elf_Shdr *s;
+ const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- /* look for .altinstructions to patch */
- for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
- void *seg;
- if (strcmp(".altinstructions", secstrings + s->sh_name))
- continue;
- seg = (void *)s->sh_addr;
- apply_alternatives(seg, seg + s->sh_size);
- }
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ if (!strcmp(".text", secstrings + s->sh_name))
+ text = s;
+ if (!strcmp(".altinstructions", secstrings + s->sh_name))
+ alt = s;
+ if (!strcmp(".smp_locks", secstrings + s->sh_name))
+ locks= s;
+ }
+
+ if (alt) {
+ /* patch .altinstructions */
+ void *aseg = (void *)alt->sh_addr;
+ apply_alternatives(aseg, aseg + alt->sh_size);
+ }
+ if (locks && text) {
+ void *lseg = (void *)locks->sh_addr;
+ void *tseg = (void *)text->sh_addr;
+ alternatives_smp_module_add(me, me->name,
+ lseg, lseg + locks->sh_size,
+ tseg, tseg + text->sh_size);
+ }
return 0;
}
void module_arch_cleanup(struct module *mod)
{
+ alternatives_smp_module_del(mod);
}
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 083da7e606b..a1ab4197f8a 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -16,7 +16,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/config.h>
#include <linux/bootmem.h>
#include <linux/smp_lock.h>
#include <linux/kernel_stat.h>
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 4e6357fe0ec..476c1472fc0 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -12,14 +12,9 @@
* Mikael Pettersson : PM converted to driver model. Disable/enable API.
*/
-#include <linux/config.h>
#include <linux/mm.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
-#include <linux/mc146818rtc.h>
-#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/sysdev.h>
#include <linux/nmi.h>
@@ -27,14 +22,11 @@
#include <linux/kprobes.h>
#include <asm/smp.h>
-#include <asm/mtrr.h>
-#include <asm/mpspec.h>
#include <asm/nmi.h>
-#include <asm/msr.h>
#include <asm/proto.h>
#include <asm/kdebug.h>
-#include <asm/local.h>
#include <asm/mce.h>
+#include <asm/intel_arch_perfmon.h>
/*
* lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
@@ -74,6 +66,9 @@ static unsigned int nmi_p4_cccr_val;
#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
+#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
+#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
+
#define MSR_P4_MISC_ENABLE 0x1A0
#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
@@ -105,7 +100,10 @@ static __cpuinit inline int nmi_known_cpu(void)
case X86_VENDOR_AMD:
return boot_cpu_data.x86 == 15;
case X86_VENDOR_INTEL:
- return boot_cpu_data.x86 == 15;
+ if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ return 1;
+ else
+ return (boot_cpu_data.x86 == 15);
}
return 0;
}
@@ -211,6 +209,8 @@ int __init setup_nmi_watchdog(char *str)
__setup("nmi_watchdog=", setup_nmi_watchdog);
+static void disable_intel_arch_watchdog(void);
+
static void disable_lapic_nmi_watchdog(void)
{
if (nmi_active <= 0)
@@ -223,6 +223,8 @@ static void disable_lapic_nmi_watchdog(void)
if (boot_cpu_data.x86 == 15) {
wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
+ } else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+ disable_intel_arch_watchdog();
}
break;
}
@@ -375,6 +377,53 @@ static void setup_k7_watchdog(void)
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
}
+static void disable_intel_arch_watchdog(void)
+{
+ unsigned ebx;
+
+ /*
+ * Check whether the Architectural PerfMon supports
+ * Unhalted Core Cycles Event or not.
+ * NOTE: Corresponding bit = 0 in ebp indicates event present.
+ */
+ ebx = cpuid_ebx(10);
+ if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
+ wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
+}
+
+static int setup_intel_arch_watchdog(void)
+{
+ unsigned int evntsel;
+ unsigned ebx;
+
+ /*
+ * Check whether the Architectural PerfMon supports
+ * Unhalted Core Cycles Event or not.
+ * NOTE: Corresponding bit = 0 in ebp indicates event present.
+ */
+ ebx = cpuid_ebx(10);
+ if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
+ return 0;
+
+ nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
+
+ clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
+ clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
+
+ evntsel = ARCH_PERFMON_EVENTSEL_INT
+ | ARCH_PERFMON_EVENTSEL_OS
+ | ARCH_PERFMON_EVENTSEL_USR
+ | ARCH_PERFMON_NMI_EVENT_SEL
+ | ARCH_PERFMON_NMI_EVENT_UMASK;
+
+ wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
+ wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+ evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
+ return 1;
+}
+
static int setup_p4_watchdog(void)
{
@@ -428,10 +477,16 @@ void setup_apic_nmi_watchdog(void)
setup_k7_watchdog();
break;
case X86_VENDOR_INTEL:
- if (boot_cpu_data.x86 != 15)
- return;
- if (!setup_p4_watchdog())
+ if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+ if (!setup_intel_arch_watchdog())
+ return;
+ } else if (boot_cpu_data.x86 == 15) {
+ if (!setup_p4_watchdog())
+ return;
+ } else {
return;
+ }
+
break;
default:
@@ -516,7 +571,14 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
*/
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
apic_write(APIC_LVTPC, APIC_DM_NMI);
- }
+ } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
+ /*
+ * For Intel based architectural perfmon
+ * - LVTPC is masked on interrupt and must be
+ * unmasked by the LVTPC handler.
+ */
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+ }
wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
}
}
@@ -544,11 +606,13 @@ void set_nmi_callback(nmi_callback_t callback)
vmalloc_sync_all();
rcu_assign_pointer(nmi_callback, callback);
}
+EXPORT_SYMBOL_GPL(set_nmi_callback);
void unset_nmi_callback(void)
{
nmi_callback = dummy_nmi_callback;
}
+EXPORT_SYMBOL_GPL(unset_nmi_callback);
#ifdef CONFIG_SYSCTL
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
new file mode 100644
index 00000000000..d91cb843f54
--- /dev/null
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -0,0 +1,1018 @@
+/*
+ * Derived from arch/powerpc/kernel/iommu.c
+ *
+ * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
+ * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/pci_ids.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/proto.h>
+#include <asm/calgary.h>
+#include <asm/tce.h>
+#include <asm/pci-direct.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+
+#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
+#define PCI_VENDOR_DEVICE_ID_CALGARY \
+ (PCI_VENDOR_ID_IBM | PCI_DEVICE_ID_IBM_CALGARY << 16)
+
+/* we need these for register space address calculation */
+#define START_ADDRESS 0xfe000000
+#define CHASSIS_BASE 0
+#define ONE_BASED_CHASSIS_NUM 1
+
+/* register offsets inside the host bridge space */
+#define PHB_CSR_OFFSET 0x0110
+#define PHB_PLSSR_OFFSET 0x0120
+#define PHB_CONFIG_RW_OFFSET 0x0160
+#define PHB_IOBASE_BAR_LOW 0x0170
+#define PHB_IOBASE_BAR_HIGH 0x0180
+#define PHB_MEM_1_LOW 0x0190
+#define PHB_MEM_1_HIGH 0x01A0
+#define PHB_IO_ADDR_SIZE 0x01B0
+#define PHB_MEM_1_SIZE 0x01C0
+#define PHB_MEM_ST_OFFSET 0x01D0
+#define PHB_AER_OFFSET 0x0200
+#define PHB_CONFIG_0_HIGH 0x0220
+#define PHB_CONFIG_0_LOW 0x0230
+#define PHB_CONFIG_0_END 0x0240
+#define PHB_MEM_2_LOW 0x02B0
+#define PHB_MEM_2_HIGH 0x02C0
+#define PHB_MEM_2_SIZE_HIGH 0x02D0
+#define PHB_MEM_2_SIZE_LOW 0x02E0
+#define PHB_DOSHOLE_OFFSET 0x08E0
+
+/* PHB_CONFIG_RW */
+#define PHB_TCE_ENABLE 0x20000000
+#define PHB_SLOT_DISABLE 0x1C000000
+#define PHB_DAC_DISABLE 0x01000000
+#define PHB_MEM2_ENABLE 0x00400000
+#define PHB_MCSR_ENABLE 0x00100000
+/* TAR (Table Address Register) */
+#define TAR_SW_BITS 0x0000ffffffff800fUL
+#define TAR_VALID 0x0000000000000008UL
+/* CSR (Channel/DMA Status Register) */
+#define CSR_AGENT_MASK 0xffe0ffff
+
+#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
+#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */
+#define PHBS_PER_CALGARY 4
+
+/* register offsets in Calgary's internal register space */
+static const unsigned long tar_offsets[] = {
+ 0x0580 /* TAR0 */,
+ 0x0588 /* TAR1 */,
+ 0x0590 /* TAR2 */,
+ 0x0598 /* TAR3 */
+};
+
+static const unsigned long split_queue_offsets[] = {
+ 0x4870 /* SPLIT QUEUE 0 */,
+ 0x5870 /* SPLIT QUEUE 1 */,
+ 0x6870 /* SPLIT QUEUE 2 */,
+ 0x7870 /* SPLIT QUEUE 3 */
+};
+
+static const unsigned long phb_offsets[] = {
+ 0x8000 /* PHB0 */,
+ 0x9000 /* PHB1 */,
+ 0xA000 /* PHB2 */,
+ 0xB000 /* PHB3 */
+};
+
+void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES];
+unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
+static int translate_empty_slots __read_mostly = 0;
+static int calgary_detected __read_mostly = 0;
+
+/*
+ * the bitmap of PHBs the user requested that we disable
+ * translation on.
+ */
+static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM);
+
+static void tce_cache_blast(struct iommu_table *tbl);
+
+/* enable this to stress test the chip's TCE cache */
+#ifdef CONFIG_IOMMU_DEBUG
+static inline void tce_cache_blast_stress(struct iommu_table *tbl)
+{
+ tce_cache_blast(tbl);
+}
+#else
+static inline void tce_cache_blast_stress(struct iommu_table *tbl)
+{
+}
+#endif /* BLAST_TCE_CACHE_ON_UNMAP */
+
+static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
+{
+ unsigned int npages;
+
+ npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK);
+ npages >>= PAGE_SHIFT;
+
+ return npages;
+}
+
+static inline int translate_phb(struct pci_dev* dev)
+{
+ int disabled = test_bit(dev->bus->number, translation_disabled);
+ return !disabled;
+}
+
+static void iommu_range_reserve(struct iommu_table *tbl,
+ unsigned long start_addr, unsigned int npages)
+{
+ unsigned long index;
+ unsigned long end;
+
+ index = start_addr >> PAGE_SHIFT;
+
+ /* bail out if we're asked to reserve a region we don't cover */
+ if (index >= tbl->it_size)
+ return;
+
+ end = index + npages;
+ if (end > tbl->it_size) /* don't go off the table */
+ end = tbl->it_size;
+
+ while (index < end) {
+ if (test_bit(index, tbl->it_map))
+ printk(KERN_ERR "Calgary: entry already allocated at "
+ "0x%lx tbl %p dma 0x%lx npages %u\n",
+ index, tbl, start_addr, npages);
+ ++index;
+ }
+ set_bit_string(tbl->it_map, start_addr >> PAGE_SHIFT, npages);
+}
+
+static unsigned long iommu_range_alloc(struct iommu_table *tbl,
+ unsigned int npages)
+{
+ unsigned long offset;
+
+ BUG_ON(npages == 0);
+
+ offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
+ tbl->it_size, npages);
+ if (offset == ~0UL) {
+ tce_cache_blast(tbl);
+ offset = find_next_zero_string(tbl->it_map, 0,
+ tbl->it_size, npages);
+ if (offset == ~0UL) {
+ printk(KERN_WARNING "Calgary: IOMMU full.\n");
+ if (panic_on_overflow)
+ panic("Calgary: fix the allocator.\n");
+ else
+ return bad_dma_address;
+ }
+ }
+
+ set_bit_string(tbl->it_map, offset, npages);
+ tbl->it_hint = offset + npages;
+ BUG_ON(tbl->it_hint > tbl->it_size);
+
+ return offset;
+}
+
+static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
+ unsigned int npages, int direction)
+{
+ unsigned long entry, flags;
+ dma_addr_t ret = bad_dma_address;
+
+ spin_lock_irqsave(&tbl->it_lock, flags);
+
+ entry = iommu_range_alloc(tbl, npages);
+
+ if (unlikely(entry == bad_dma_address))
+ goto error;
+
+ /* set the return dma address */
+ ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
+
+ /* put the TCEs in the HW table */
+ tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
+ direction);
+
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+
+ return ret;
+
+error:
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+ printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
+ "iommu %p\n", npages, tbl);
+ return bad_dma_address;
+}
+
+static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
+ unsigned int npages)
+{
+ unsigned long entry;
+ unsigned long i;
+
+ entry = dma_addr >> PAGE_SHIFT;
+
+ BUG_ON(entry + npages > tbl->it_size);
+
+ tce_free(tbl, entry, npages);
+
+ for (i = 0; i < npages; ++i) {
+ if (!test_bit(entry + i, tbl->it_map))
+ printk(KERN_ERR "Calgary: bit is off at 0x%lx "
+ "tbl %p dma 0x%Lx entry 0x%lx npages %u\n",
+ entry + i, tbl, dma_addr, entry, npages);
+ }
+
+ __clear_bit_string(tbl->it_map, entry, npages);
+
+ tce_cache_blast_stress(tbl);
+}
+
+static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
+ unsigned int npages)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tbl->it_lock, flags);
+
+ __iommu_free(tbl, dma_addr, npages);
+
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+}
+
+static void __calgary_unmap_sg(struct iommu_table *tbl,
+ struct scatterlist *sglist, int nelems, int direction)
+{
+ while (nelems--) {
+ unsigned int npages;
+ dma_addr_t dma = sglist->dma_address;
+ unsigned int dmalen = sglist->dma_length;
+
+ if (dmalen == 0)
+ break;
+
+ npages = num_dma_pages(dma, dmalen);
+ __iommu_free(tbl, dma, npages);
+ sglist++;
+ }
+}
+
+void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, int direction)
+{
+ unsigned long flags;
+ struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
+
+ if (!translate_phb(to_pci_dev(dev)))
+ return;
+
+ spin_lock_irqsave(&tbl->it_lock, flags);
+
+ __calgary_unmap_sg(tbl, sglist, nelems, direction);
+
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+}
+
+static int calgary_nontranslate_map_sg(struct device* dev,
+ struct scatterlist *sg, int nelems, int direction)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++ ) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(!s->page);
+ s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ s->dma_length = s->length;
+ }
+ return nelems;
+}
+
+int calgary_map_sg(struct device *dev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
+ unsigned long flags;
+ unsigned long vaddr;
+ unsigned int npages;
+ unsigned long entry;
+ int i;
+
+ if (!translate_phb(to_pci_dev(dev)))
+ return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
+
+ spin_lock_irqsave(&tbl->it_lock, flags);
+
+ for (i = 0; i < nelems; i++ ) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(!s->page);
+
+ vaddr = (unsigned long)page_address(s->page) + s->offset;
+ npages = num_dma_pages(vaddr, s->length);
+
+ entry = iommu_range_alloc(tbl, npages);
+ if (entry == bad_dma_address) {
+ /* makes sure unmap knows to stop */
+ s->dma_length = 0;
+ goto error;
+ }
+
+ s->dma_address = (entry << PAGE_SHIFT) | s->offset;
+
+ /* insert into HW table */
+ tce_build(tbl, entry, npages, vaddr & PAGE_MASK,
+ direction);
+
+ s->dma_length = s->length;
+ }
+
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+
+ return nelems;
+error:
+ __calgary_unmap_sg(tbl, sg, nelems, direction);
+ for (i = 0; i < nelems; i++) {
+ sg[i].dma_address = bad_dma_address;
+ sg[i].dma_length = 0;
+ }
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+ return 0;
+}
+
+dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
+ size_t size, int direction)
+{
+ dma_addr_t dma_handle = bad_dma_address;
+ unsigned long uaddr;
+ unsigned int npages;
+ struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
+
+ uaddr = (unsigned long)vaddr;
+ npages = num_dma_pages(uaddr, size);
+
+ if (translate_phb(to_pci_dev(dev)))
+ dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
+ else
+ dma_handle = virt_to_bus(vaddr);
+
+ return dma_handle;
+}
+
+void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
+ unsigned int npages;
+
+ if (!translate_phb(to_pci_dev(dev)))
+ return;
+
+ npages = num_dma_pages(dma_handle, size);
+ iommu_free(tbl, dma_handle, npages);
+}
+
+void* calgary_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ void *ret = NULL;
+ dma_addr_t mapping;
+ unsigned int npages, order;
+ struct iommu_table *tbl;
+
+ tbl = to_pci_dev(dev)->bus->self->sysdata;
+
+ size = PAGE_ALIGN(size); /* size rounded up to full pages */
+ npages = size >> PAGE_SHIFT;
+ order = get_order(size);
+
+ /* alloc enough pages (and possibly more) */
+ ret = (void *)__get_free_pages(flag, order);
+ if (!ret)
+ goto error;
+ memset(ret, 0, size);
+
+ if (translate_phb(to_pci_dev(dev))) {
+ /* set up tces to cover the allocated range */
+ mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
+ if (mapping == bad_dma_address)
+ goto free;
+
+ *dma_handle = mapping;
+ } else /* non translated slot */
+ *dma_handle = virt_to_bus(ret);
+
+ return ret;
+
+free:
+ free_pages((unsigned long)ret, get_order(size));
+ ret = NULL;
+error:
+ return ret;
+}
+
+static struct dma_mapping_ops calgary_dma_ops = {
+ .alloc_coherent = calgary_alloc_coherent,
+ .map_single = calgary_map_single,
+ .unmap_single = calgary_unmap_single,
+ .map_sg = calgary_map_sg,
+ .unmap_sg = calgary_unmap_sg,
+};
+
+static inline int busno_to_phbid(unsigned char num)
+{
+ return bus_to_phb(num) % PHBS_PER_CALGARY;
+}
+
+static inline unsigned long split_queue_offset(unsigned char num)
+{
+ size_t idx = busno_to_phbid(num);
+
+ return split_queue_offsets[idx];
+}
+
+static inline unsigned long tar_offset(unsigned char num)
+{
+ size_t idx = busno_to_phbid(num);
+
+ return tar_offsets[idx];
+}
+
+static inline unsigned long phb_offset(unsigned char num)
+{
+ size_t idx = busno_to_phbid(num);
+
+ return phb_offsets[idx];
+}
+
+static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset)
+{
+ unsigned long target = ((unsigned long)bar) | offset;
+ return (void __iomem*)target;
+}
+
+static void tce_cache_blast(struct iommu_table *tbl)
+{
+ u64 val;
+ u32 aer;
+ int i = 0;
+ void __iomem *bbar = tbl->bbar;
+ void __iomem *target;
+
+ /* disable arbitration on the bus */
+ target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
+ aer = readl(target);
+ writel(0, target);
+
+ /* read plssr to ensure it got there */
+ target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
+ val = readl(target);
+
+ /* poll split queues until all DMA activity is done */
+ target = calgary_reg(bbar, split_queue_offset(tbl->it_busno));
+ do {
+ val = readq(target);
+ i++;
+ } while ((val & 0xff) != 0xff && i < 100);
+ if (i == 100)
+ printk(KERN_WARNING "Calgary: PCI bus not quiesced, "
+ "continuing anyway\n");
+
+ /* invalidate TCE cache */
+ target = calgary_reg(bbar, tar_offset(tbl->it_busno));
+ writeq(tbl->tar_val, target);
+
+ /* enable arbitration */
+ target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
+ writel(aer, target);
+ (void)readl(target); /* flush */
+}
+
+static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
+ u64 limit)
+{
+ unsigned int numpages;
+
+ limit = limit | 0xfffff;
+ limit++;
+
+ numpages = ((limit - start) >> PAGE_SHIFT);
+ iommu_range_reserve(dev->sysdata, start, numpages);
+}
+
+static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
+{
+ void __iomem *target;
+ u64 low, high, sizelow;
+ u64 start, limit;
+ struct iommu_table *tbl = dev->sysdata;
+ unsigned char busnum = dev->bus->number;
+ void __iomem *bbar = tbl->bbar;
+
+ /* peripheral MEM_1 region */
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_LOW);
+ low = be32_to_cpu(readl(target));
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_HIGH);
+ high = be32_to_cpu(readl(target));
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_SIZE);
+ sizelow = be32_to_cpu(readl(target));
+
+ start = (high << 32) | low;
+ limit = sizelow;
+
+ calgary_reserve_mem_region(dev, start, limit);
+}
+
+static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
+{
+ void __iomem *target;
+ u32 val32;
+ u64 low, high, sizelow, sizehigh;
+ u64 start, limit;
+ struct iommu_table *tbl = dev->sysdata;
+ unsigned char busnum = dev->bus->number;
+ void __iomem *bbar = tbl->bbar;
+
+ /* is it enabled? */
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
+ val32 = be32_to_cpu(readl(target));
+ if (!(val32 & PHB_MEM2_ENABLE))
+ return;
+
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_LOW);
+ low = be32_to_cpu(readl(target));
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_HIGH);
+ high = be32_to_cpu(readl(target));
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_LOW);
+ sizelow = be32_to_cpu(readl(target));
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_HIGH);
+ sizehigh = be32_to_cpu(readl(target));
+
+ start = (high << 32) | low;
+ limit = (sizehigh << 32) | sizelow;
+
+ calgary_reserve_mem_region(dev, start, limit);
+}
+
+/*
+ * some regions of the IO address space do not get translated, so we
+ * must not give devices IO addresses in those regions. The regions
+ * are the 640KB-1MB region and the two PCI peripheral memory holes.
+ * Reserve all of them in the IOMMU bitmap to avoid giving them out
+ * later.
+ */
+static void __init calgary_reserve_regions(struct pci_dev *dev)
+{
+ unsigned int npages;
+ void __iomem *bbar;
+ unsigned char busnum;
+ u64 start;
+ struct iommu_table *tbl = dev->sysdata;
+
+ bbar = tbl->bbar;
+ busnum = dev->bus->number;
+
+ /* reserve bad_dma_address in case it's a legal address */
+ iommu_range_reserve(tbl, bad_dma_address, 1);
+
+ /* avoid the BIOS/VGA first 640KB-1MB region */
+ start = (640 * 1024);
+ npages = ((1024 - 640) * 1024) >> PAGE_SHIFT;
+ iommu_range_reserve(tbl, start, npages);
+
+ /* reserve the two PCI peripheral memory regions in IO space */
+ calgary_reserve_peripheral_mem_1(dev);
+ calgary_reserve_peripheral_mem_2(dev);
+}
+
+static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
+{
+ u64 val64;
+ u64 table_phys;
+ void __iomem *target;
+ int ret;
+ struct iommu_table *tbl;
+
+ /* build TCE tables for each PHB */
+ ret = build_tce_table(dev, bbar);
+ if (ret)
+ return ret;
+
+ calgary_reserve_regions(dev);
+
+ /* set TARs for each PHB */
+ target = calgary_reg(bbar, tar_offset(dev->bus->number));
+ val64 = be64_to_cpu(readq(target));
+
+ /* zero out all TAR bits under sw control */
+ val64 &= ~TAR_SW_BITS;
+
+ tbl = dev->sysdata;
+ table_phys = (u64)__pa(tbl->it_base);
+ val64 |= table_phys;
+
+ BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M);
+ val64 |= (u64) specified_table_size;
+
+ tbl->tar_val = cpu_to_be64(val64);
+ writeq(tbl->tar_val, target);
+ readq(target); /* flush */
+
+ return 0;
+}
+
+static void __init calgary_free_tar(struct pci_dev *dev)
+{
+ u64 val64;
+ struct iommu_table *tbl = dev->sysdata;
+ void __iomem *target;
+
+ target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number));
+ val64 = be64_to_cpu(readq(target));
+ val64 &= ~TAR_SW_BITS;
+ writeq(cpu_to_be64(val64), target);
+ readq(target); /* flush */
+
+ kfree(tbl);
+ dev->sysdata = NULL;
+}
+
+static void calgary_watchdog(unsigned long data)
+{
+ struct pci_dev *dev = (struct pci_dev *)data;
+ struct iommu_table *tbl = dev->sysdata;
+ void __iomem *bbar = tbl->bbar;
+ u32 val32;
+ void __iomem *target;
+
+ target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
+ val32 = be32_to_cpu(readl(target));
+
+ /* If no error, the agent ID in the CSR is not valid */
+ if (val32 & CSR_AGENT_MASK) {
+ printk(KERN_EMERG "calgary_watchdog: DMA error on bus %d, "
+ "CSR = %#x\n", dev->bus->number, val32);
+ writel(0, target);
+
+ /* Disable bus that caused the error */
+ target = calgary_reg(bbar, phb_offset(tbl->it_busno) |
+ PHB_CONFIG_RW_OFFSET);
+ val32 = be32_to_cpu(readl(target));
+ val32 |= PHB_SLOT_DISABLE;
+ writel(cpu_to_be32(val32), target);
+ readl(target); /* flush */
+ } else {
+ /* Reset the timer */
+ mod_timer(&tbl->watchdog_timer, jiffies + 2 * HZ);
+ }
+}
+
+static void __init calgary_enable_translation(struct pci_dev *dev)
+{
+ u32 val32;
+ unsigned char busnum;
+ void __iomem *target;
+ void __iomem *bbar;
+ struct iommu_table *tbl;
+
+ busnum = dev->bus->number;
+ tbl = dev->sysdata;
+ bbar = tbl->bbar;
+
+ /* enable TCE in PHB Config Register */
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
+ val32 = be32_to_cpu(readl(target));
+ val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;
+
+ printk(KERN_INFO "Calgary: enabling translation on PHB %d\n", busnum);
+ printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
+ "bus.\n");
+
+ writel(cpu_to_be32(val32), target);
+ readl(target); /* flush */
+
+ init_timer(&tbl->watchdog_timer);
+ tbl->watchdog_timer.function = &calgary_watchdog;
+ tbl->watchdog_timer.data = (unsigned long)dev;
+ mod_timer(&tbl->watchdog_timer, jiffies);
+}
+
+static void __init calgary_disable_translation(struct pci_dev *dev)
+{
+ u32 val32;
+ unsigned char busnum;
+ void __iomem *target;
+ void __iomem *bbar;
+ struct iommu_table *tbl;
+
+ busnum = dev->bus->number;
+ tbl = dev->sysdata;
+ bbar = tbl->bbar;
+
+ /* disable TCE in PHB Config Register */
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
+ val32 = be32_to_cpu(readl(target));
+ val32 &= ~(PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE);
+
+ printk(KERN_INFO "Calgary: disabling translation on PHB %d!\n", busnum);
+ writel(cpu_to_be32(val32), target);
+ readl(target); /* flush */
+
+ del_timer_sync(&tbl->watchdog_timer);
+}
+
+static inline unsigned int __init locate_register_space(struct pci_dev *dev)
+{
+ int rionodeid;
+ u32 address;
+
+ rionodeid = (dev->bus->number % 15 > 4) ? 3 : 2;
+ /*
+ * register space address calculation as follows:
+ * FE0MB-8MB*OneBasedChassisNumber+1MB*(RioNodeId-ChassisBase)
+ * ChassisBase is always zero for x366/x260/x460
+ * RioNodeId is 2 for first Calgary, 3 for second Calgary
+ */
+ address = START_ADDRESS -
+ (0x800000 * (ONE_BASED_CHASSIS_NUM + dev->bus->number / 15)) +
+ (0x100000) * (rionodeid - CHASSIS_BASE);
+ return address;
+}
+
+static int __init calgary_init_one_nontraslated(struct pci_dev *dev)
+{
+ dev->sysdata = NULL;
+ dev->bus->self = dev;
+
+ return 0;
+}
+
+static int __init calgary_init_one(struct pci_dev *dev)
+{
+ u32 address;
+ void __iomem *bbar;
+ int ret;
+
+ address = locate_register_space(dev);
+ /* map entire 1MB of Calgary config space */
+ bbar = ioremap_nocache(address, 1024 * 1024);
+ if (!bbar) {
+ ret = -ENODATA;
+ goto done;
+ }
+
+ ret = calgary_setup_tar(dev, bbar);
+ if (ret)
+ goto iounmap;
+
+ dev->bus->self = dev;
+ calgary_enable_translation(dev);
+
+ return 0;
+
+iounmap:
+ iounmap(bbar);
+done:
+ return ret;
+}
+
+static int __init calgary_init(void)
+{
+ int i, ret = -ENODEV;
+ struct pci_dev *dev = NULL;
+
+ for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) {
+ dev = pci_get_device(PCI_VENDOR_ID_IBM,
+ PCI_DEVICE_ID_IBM_CALGARY,
+ dev);
+ if (!dev)
+ break;
+ if (!translate_phb(dev)) {
+ calgary_init_one_nontraslated(dev);
+ continue;
+ }
+ if (!tce_table_kva[i] && !translate_empty_slots) {
+ pci_dev_put(dev);
+ continue;
+ }
+ ret = calgary_init_one(dev);
+ if (ret)
+ goto error;
+ }
+
+ return ret;
+
+error:
+ for (i--; i >= 0; i--) {
+ dev = pci_find_device_reverse(PCI_VENDOR_ID_IBM,
+ PCI_DEVICE_ID_IBM_CALGARY,
+ dev);
+ if (!translate_phb(dev)) {
+ pci_dev_put(dev);
+ continue;
+ }
+ if (!tce_table_kva[i] && !translate_empty_slots)
+ continue;
+ calgary_disable_translation(dev);
+ calgary_free_tar(dev);
+ pci_dev_put(dev);
+ }
+
+ return ret;
+}
+
+static inline int __init determine_tce_table_size(u64 ram)
+{
+ int ret;
+
+ if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED)
+ return specified_table_size;
+
+ /*
+ * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
+ * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
+ * larger table size has twice as many entries, so shift the
+ * max ram address by 13 to divide by 8K and then look at the
+ * order of the result to choose between 0-7.
+ */
+ ret = get_order(ram >> 13);
+ if (ret > TCE_TABLE_SIZE_8M)
+ ret = TCE_TABLE_SIZE_8M;
+
+ return ret;
+}
+
+void __init detect_calgary(void)
+{
+ u32 val;
+ int bus, table_idx;
+ void *tbl;
+ int detected = 0;
+
+ /*
+ * if the user specified iommu=off or iommu=soft or we found
+ * another HW IOMMU already, bail out.
+ */
+ if (swiotlb || no_iommu || iommu_detected)
+ return;
+
+ specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
+
+ for (bus = 0, table_idx = 0;
+ bus <= num_online_nodes() * MAX_PHB_BUS_NUM;
+ bus++) {
+ BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM);
+ if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
+ continue;
+ if (test_bit(bus, translation_disabled)) {
+ printk(KERN_INFO "Calgary: translation is disabled for "
+ "PHB 0x%x\n", bus);
+ /* skip this phb, don't allocate a tbl for it */
+ tce_table_kva[table_idx] = NULL;
+ table_idx++;
+ continue;
+ }
+ /*
+ * scan the first slot of the PCI bus to see if there
+ * are any devices present
+ */
+ val = read_pci_config(bus, 1, 0, 0);
+ if (val != 0xffffffff || translate_empty_slots) {
+ tbl = alloc_tce_table();
+ if (!tbl)
+ goto cleanup;
+ detected = 1;
+ } else
+ tbl = NULL;
+
+ tce_table_kva[table_idx] = tbl;
+ table_idx++;
+ }
+
+ if (detected) {
+ iommu_detected = 1;
+ calgary_detected = 1;
+ printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. "
+ "TCE table spec is %d.\n", specified_table_size);
+ }
+ return;
+
+cleanup:
+ for (--table_idx; table_idx >= 0; --table_idx)
+ if (tce_table_kva[table_idx])
+ free_tce_table(tce_table_kva[table_idx]);
+}
+
+int __init calgary_iommu_init(void)
+{
+ int ret;
+
+ if (no_iommu || swiotlb)
+ return -ENODEV;
+
+ if (!calgary_detected)
+ return -ENODEV;
+
+ /* ok, we're trying to use Calgary - let's roll */
+ printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
+
+ ret = calgary_init();
+ if (ret) {
+ printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
+ "falling back to no_iommu\n", ret);
+ if (end_pfn > MAX_DMA32_PFN)
+ printk(KERN_ERR "WARNING more than 4GB of memory, "
+ "32bit PCI may malfunction.\n");
+ return ret;
+ }
+
+ force_iommu = 1;
+ dma_ops = &calgary_dma_ops;
+
+ return 0;
+}
+
+static int __init calgary_parse_options(char *p)
+{
+ unsigned int bridge;
+ size_t len;
+ char* endp;
+
+ while (*p) {
+ if (!strncmp(p, "64k", 3))
+ specified_table_size = TCE_TABLE_SIZE_64K;
+ else if (!strncmp(p, "128k", 4))
+ specified_table_size = TCE_TABLE_SIZE_128K;
+ else if (!strncmp(p, "256k", 4))
+ specified_table_size = TCE_TABLE_SIZE_256K;
+ else if (!strncmp(p, "512k", 4))
+ specified_table_size = TCE_TABLE_SIZE_512K;
+ else if (!strncmp(p, "1M", 2))
+ specified_table_size = TCE_TABLE_SIZE_1M;
+ else if (!strncmp(p, "2M", 2))
+ specified_table_size = TCE_TABLE_SIZE_2M;
+ else if (!strncmp(p, "4M", 2))
+ specified_table_size = TCE_TABLE_SIZE_4M;
+ else if (!strncmp(p, "8M", 2))
+ specified_table_size = TCE_TABLE_SIZE_8M;
+
+ len = strlen("translate_empty_slots");
+ if (!strncmp(p, "translate_empty_slots", len))
+ translate_empty_slots = 1;
+
+ len = strlen("disable");
+ if (!strncmp(p, "disable", len)) {
+ p += len;
+ if (*p == '=')
+ ++p;
+ if (*p == '\0')
+ break;
+ bridge = simple_strtol(p, &endp, 0);
+ if (p == endp)
+ break;
+
+ if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) {
+ printk(KERN_INFO "Calgary: disabling "
+ "translation for PHB 0x%x\n", bridge);
+ set_bit(bridge, translation_disabled);
+ }
+ }
+
+ p = strpbrk(p, ",");
+ if (!p)
+ break;
+
+ p++; /* skip ',' */
+ }
+ return 1;
+}
+__setup("calgary=", calgary_parse_options);
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index a9275c9557c..9c44f4f2433 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <asm/io.h>
#include <asm/proto.h>
+#include <asm/calgary.h>
int iommu_merge __read_mostly = 0;
EXPORT_SYMBOL(iommu_merge);
@@ -33,12 +34,15 @@ int panic_on_overflow __read_mostly = 0;
int force_iommu __read_mostly= 0;
#endif
+/* Set this to 1 if there is a HW IOMMU in the system */
+int iommu_detected __read_mostly = 0;
+
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
to i386. */
struct device fallback_dev = {
.bus_id = "fallback device",
- .coherent_dma_mask = 0xffffffff,
+ .coherent_dma_mask = DMA_32BIT_MASK,
.dma_mask = &fallback_dev.coherent_dma_mask,
};
@@ -77,7 +81,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
dev = &fallback_dev;
dma_mask = dev->coherent_dma_mask;
if (dma_mask == 0)
- dma_mask = 0xffffffff;
+ dma_mask = DMA_32BIT_MASK;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
@@ -90,7 +94,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
larger than 16MB and in this case we have a chance of
finding fitting memory in the next higher zone first. If
not retry with true GFP_DMA. -AK */
- if (dma_mask <= 0xffffffff)
+ if (dma_mask <= DMA_32BIT_MASK)
gfp |= GFP_DMA32;
again:
@@ -111,7 +115,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* Don't use the 16MB ZONE_DMA unless absolutely
needed. It's better to use remapping first. */
- if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) {
+ if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
goto again;
}
@@ -174,7 +178,7 @@ int dma_supported(struct device *dev, u64 mask)
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
- if (mask < 0x00ffffff)
+ if (mask < DMA_24BIT_MASK)
return 0;
/* Tell the device to use SAC when IOMMU force is on. This
@@ -189,7 +193,7 @@ int dma_supported(struct device *dev, u64 mask)
SAC for these. Assume all masks <= 40 bits are of this
type. Normally this doesn't make any difference, but gives
more gentle handling of IOMMU overflow. */
- if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
+ if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
return 0;
}
@@ -266,7 +270,7 @@ __init int iommu_setup(char *p)
swiotlb = 1;
#endif
-#ifdef CONFIG_GART_IOMMU
+#ifdef CONFIG_IOMMU
gart_parse_options(p);
#endif
@@ -276,3 +280,40 @@ __init int iommu_setup(char *p)
}
return 1;
}
+__setup("iommu=", iommu_setup);
+
+void __init pci_iommu_alloc(void)
+{
+ /*
+ * The order of these functions is important for
+ * fall-back/fail-over reasons
+ */
+#ifdef CONFIG_IOMMU
+ iommu_hole_init();
+#endif
+
+#ifdef CONFIG_CALGARY_IOMMU
+ detect_calgary();
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ pci_swiotlb_init();
+#endif
+}
+
+static int __init pci_iommu_init(void)
+{
+#ifdef CONFIG_CALGARY_IOMMU
+ calgary_iommu_init();
+#endif
+
+#ifdef CONFIG_IOMMU
+ gart_iommu_init();
+#endif
+
+ no_iommu_init();
+ return 0;
+}
+
+/* Must execute after PCI subsystem */
+fs_initcall(pci_iommu_init);
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 82a7c9bfdfa..6d3e61baf7a 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -10,7 +10,6 @@
* Copyright 2002 Andi Kleen, SuSE Labs.
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/agp_backend.h>
@@ -32,6 +31,7 @@
#include <asm/kdebug.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
+#include <asm/k8.h>
unsigned long iommu_bus_base; /* GART remapping area (physical) */
static unsigned long iommu_size; /* size of remapping area bytes */
@@ -46,8 +46,6 @@ u32 *iommu_gatt_base; /* Remapping table */
also seen with Qlogic at least). */
int iommu_fullflush = 1;
-#define MAX_NB 8
-
/* Allocation bitmap for the remapping area */
static DEFINE_SPINLOCK(iommu_bitmap_lock);
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
@@ -63,13 +61,6 @@ static u32 gart_unmapped_entry;
#define to_pages(addr,size) \
(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
-#define for_all_nb(dev) \
- dev = NULL; \
- while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)
-
-static struct pci_dev *northbridges[MAX_NB];
-static u32 northbridge_flush_word[MAX_NB];
-
#define EMERGENCY_PAGES 32 /* = 128KB */
#ifdef CONFIG_AGP
@@ -93,7 +84,7 @@ static unsigned long alloc_iommu(int size)
offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
if (offset == -1) {
need_flush = 1;
- offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
+ offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
}
if (offset != -1) {
set_bit_string(iommu_gart_bitmap, offset, size);
@@ -120,44 +111,17 @@ static void free_iommu(unsigned long offset, int size)
/*
* Use global flush state to avoid races with multiple flushers.
*/
-static void flush_gart(struct device *dev)
+static void flush_gart(void)
{
unsigned long flags;
- int flushed = 0;
- int i, max;
-
spin_lock_irqsave(&iommu_bitmap_lock, flags);
- if (need_flush) {
- max = 0;
- for (i = 0; i < MAX_NB; i++) {
- if (!northbridges[i])
- continue;
- pci_write_config_dword(northbridges[i], 0x9c,
- northbridge_flush_word[i] | 1);
- flushed++;
- max = i;
- }
- for (i = 0; i <= max; i++) {
- u32 w;
- if (!northbridges[i])
- continue;
- /* Make sure the hardware actually executed the flush. */
- for (;;) {
- pci_read_config_dword(northbridges[i], 0x9c, &w);
- if (!(w & 1))
- break;
- cpu_relax();
- }
- }
- if (!flushed)
- printk("nothing to flush?\n");
+ if (need_flush) {
+ k8_flush_garts();
need_flush = 0;
}
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}
-
-
#ifdef CONFIG_IOMMU_LEAK
#define SET_LEAK(x) if (iommu_leak_tab) \
@@ -266,7 +230,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf,
size_t size, int dir)
{
dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
- flush_gart(dev);
+ flush_gart();
return map;
}
@@ -289,6 +253,28 @@ dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
}
/*
+ * Free a DMA mapping.
+ */
+void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ unsigned long iommu_page;
+ int npages;
+ int i;
+
+ if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
+ dma_addr >= iommu_bus_base + iommu_size)
+ return;
+ iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
+ npages = to_pages(dma_addr, size);
+ for (i = 0; i < npages; i++) {
+ iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
+ CLEAR_LEAK(iommu_page + i);
+ }
+ free_iommu(iommu_page, npages);
+}
+
+/*
* Wrapper for pci_unmap_single working with scatterlists.
*/
void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
@@ -299,7 +285,7 @@ void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int di
struct scatterlist *s = &sg[i];
if (!s->dma_length || !s->length)
break;
- dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
+ gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
}
}
@@ -329,7 +315,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
s->dma_address = addr;
s->dma_length = s->length;
}
- flush_gart(dev);
+ flush_gart();
return nents;
}
@@ -436,13 +422,13 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
goto error;
out++;
- flush_gart(dev);
+ flush_gart();
if (out < nents)
sg[out].dma_length = 0;
return out;
error:
- flush_gart(NULL);
+ flush_gart();
gart_unmap_sg(dev, sg, nents, dir);
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
@@ -458,28 +444,6 @@ error:
return 0;
}
-/*
- * Free a DMA mapping.
- */
-void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- unsigned long iommu_page;
- int npages;
- int i;
-
- if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
- dma_addr >= iommu_bus_base + iommu_size)
- return;
- iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
- npages = to_pages(dma_addr, size);
- for (i = 0; i < npages; i++) {
- iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
- CLEAR_LEAK(iommu_page + i);
- }
- free_iommu(iommu_page, npages);
-}
-
static int no_agp;
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -532,10 +496,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
void *gatt;
unsigned aper_base, new_aper_base;
unsigned aper_size, gatt_size, new_aper_size;
-
+ int i;
+
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
aper_size = aper_base = info->aper_size = 0;
- for_all_nb(dev) {
+ dev = NULL;
+ for (i = 0; i < num_k8_northbridges; i++) {
+ dev = k8_northbridges[i];
new_aper_base = read_aperture(dev, &new_aper_size);
if (!new_aper_base)
goto nommu;
@@ -558,11 +525,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
panic("Cannot allocate GATT table");
memset(gatt, 0, gatt_size);
agp_gatt_table = gatt;
-
- for_all_nb(dev) {
+
+ for (i = 0; i < num_k8_northbridges; i++) {
u32 ctl;
u32 gatt_reg;
+ dev = k8_northbridges[i];
gatt_reg = __pa(gatt) >> 12;
gatt_reg <<= 4;
pci_write_config_dword(dev, 0x98, gatt_reg);
@@ -573,7 +541,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
pci_write_config_dword(dev, 0x90, ctl);
}
- flush_gart(NULL);
+ flush_gart();
printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
return 0;
@@ -602,15 +570,19 @@ static struct dma_mapping_ops gart_dma_ops = {
.unmap_sg = gart_unmap_sg,
};
-static int __init pci_iommu_init(void)
+void __init gart_iommu_init(void)
{
struct agp_kern_info info;
unsigned long aper_size;
unsigned long iommu_start;
- struct pci_dev *dev;
unsigned long scratch;
long i;
+ if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
+ printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
+ return;
+ }
+
#ifndef CONFIG_AGP_AMD64
no_agp = 1;
#else
@@ -622,7 +594,11 @@ static int __init pci_iommu_init(void)
#endif
if (swiotlb)
- return -1;
+ return;
+
+ /* Did we detect a different HW IOMMU? */
+ if (iommu_detected && !iommu_aperture)
+ return;
if (no_iommu ||
(!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
@@ -634,15 +610,7 @@ static int __init pci_iommu_init(void)
"but IOMMU not available.\n"
KERN_ERR "WARNING 32bit PCI may malfunction.\n");
}
- return -1;
- }
-
- i = 0;
- for_all_nb(dev)
- i++;
- if (i > MAX_NB) {
- printk(KERN_ERR "PCI-GART: Too many northbridges (%ld). Disabled\n", i);
- return -1;
+ return;
}
printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
@@ -707,26 +675,10 @@ static int __init pci_iommu_init(void)
for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
iommu_gatt_base[i] = gart_unmapped_entry;
- for_all_nb(dev) {
- u32 flag;
- int cpu = PCI_SLOT(dev->devfn) - 24;
- if (cpu >= MAX_NB)
- continue;
- northbridges[cpu] = dev;
- pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
- northbridge_flush_word[cpu] = flag;
- }
-
- flush_gart(NULL);
-
+ flush_gart();
dma_ops = &gart_dma_ops;
-
- return 0;
}
-/* Must execute after PCI subsystem */
-fs_initcall(pci_iommu_init);
-
void gart_parse_options(char *p)
{
int arg;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 1f6ecc62061..c4c3cc36ac5 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -4,6 +4,8 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
+#include <linux/dma-mapping.h>
+
#include <asm/proto.h>
#include <asm/processor.h>
#include <asm/dma.h>
@@ -12,10 +14,11 @@ static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
if (hwdev && bus + size > *hwdev->dma_mask) {
- if (*hwdev->dma_mask >= 0xffffffffULL)
+ if (*hwdev->dma_mask >= DMA_32BIT_MASK)
printk(KERN_ERR
- "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
- name, (long long)bus, size, (long long)*hwdev->dma_mask);
+ "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
+ name, (long long)bus, size,
+ (long long)*hwdev->dma_mask);
return 0;
}
return 1;
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 990ed67896f..ebdb77fe205 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -31,7 +31,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
void pci_swiotlb_init(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
- if (!iommu_aperture && !no_iommu &&
+ if (!iommu_detected && !no_iommu &&
(end_pfn > MAX_DMA32_PFN || force_iommu))
swiotlb = 1;
if (swiotlb) {
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index bf421ed2680..7554458dc9c 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -27,7 +27,7 @@
/* The I/O port the PMTMR resides at.
* The location is detected during setup_arch(),
* in arch/i386/kernel/acpi/boot.c */
-u32 pmtmr_ioport;
+u32 pmtmr_ioport __read_mostly;
/* value of the Power timer at last timer interrupt */
static u32 offset_delay;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index fb903e65e07..ca56e19b8b6 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -10,7 +10,6 @@
* Andi Kleen.
*
* CPU hotplug support - ashok.raj@intel.com
- * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
*/
/*
@@ -64,6 +63,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void);
+EXPORT_SYMBOL(pm_idle);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -111,7 +111,7 @@ static void default_idle(void)
{
local_irq_enable();
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit();
while (!need_resched()) {
local_irq_disable();
@@ -120,7 +120,7 @@ static void default_idle(void)
else
local_irq_enable();
}
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
}
/*
@@ -203,8 +203,7 @@ static inline void play_dead(void)
*/
void cpu_idle (void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
+ current_thread_info()->status |= TS_POLLING;
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched()) {
@@ -335,7 +334,7 @@ void show_regs(struct pt_regs *regs)
{
printk("CPU %d:", smp_processor_id());
__show_regs(regs);
- show_trace(&regs->rsp);
+ show_trace(NULL, regs, (void *)(regs + 1));
}
/*
@@ -365,8 +364,11 @@ void flush_thread(void)
struct task_struct *tsk = current;
struct thread_info *t = current_thread_info();
- if (t->flags & _TIF_ABI_PENDING)
+ if (t->flags & _TIF_ABI_PENDING) {
t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
+ if (t->flags & _TIF_IA32)
+ current_thread_info()->status |= TS_COMPAT;
+ }
tsk->thread.debugreg0 = 0;
tsk->thread.debugreg1 = 0;
diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c
index 57117b8beb2..2d676984745 100644
--- a/arch/x86_64/kernel/reboot.c
+++ b/arch/x86_64/kernel/reboot.c
@@ -20,6 +20,7 @@
* Power off function, if any
*/
void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
static long no_idt[3];
static enum {
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 655b9192eeb..0925518b58d 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -5,8 +5,6 @@
*
* Nov 2001 Dave Jones <davej@suse.de>
* Forked from i386 setup code.
- *
- * $Id$
*/
/*
@@ -26,7 +24,6 @@
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/delay.h>
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
@@ -65,9 +62,7 @@
#include <asm/setup.h>
#include <asm/mach_apic.h>
#include <asm/numa.h>
-#include <asm/swiotlb.h>
#include <asm/sections.h>
-#include <asm/gart-mapping.h>
#include <asm/dmi.h>
/*
@@ -75,6 +70,7 @@
*/
struct cpuinfo_x86 boot_cpu_data __read_mostly;
+EXPORT_SYMBOL(boot_cpu_data);
unsigned long mmu_cr4_features;
@@ -103,12 +99,14 @@ char dmi_alloc_data[DMI_MAX_DATA];
* Setup options
*/
struct screen_info screen_info;
+EXPORT_SYMBOL(screen_info);
struct sys_desc_table_struct {
unsigned short length;
unsigned char table[0];
};
struct edid_info edid_info;
+EXPORT_SYMBOL_GPL(edid_info);
struct e820map e820;
extern int root_mountflags;
@@ -473,80 +471,6 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
}
#endif
-/* Use inline assembly to define this because the nops are defined
- as inline assembly strings in the include files and we cannot
- get them easily into strings. */
-asm("\t.data\nk8nops: "
- K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
- K8_NOP7 K8_NOP8);
-
-extern unsigned char k8nops[];
-static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
- NULL,
- k8nops,
- k8nops + 1,
- k8nops + 1 + 2,
- k8nops + 1 + 2 + 3,
- k8nops + 1 + 2 + 3 + 4,
- k8nops + 1 + 2 + 3 + 4 + 5,
- k8nops + 1 + 2 + 3 + 4 + 5 + 6,
- k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-};
-
-extern char __vsyscall_0;
-
-/* Replace instructions with better alternatives for this CPU type.
-
- This runs before SMP is initialized to avoid SMP problems with
- self modifying code. This implies that assymetric systems where
- APs have less capabilities than the boot processor are not handled.
- In this case boot with "noreplacement". */
-void apply_alternatives(void *start, void *end)
-{
- struct alt_instr *a;
- int diff, i, k;
- for (a = start; (void *)a < end; a++) {
- u8 *instr;
-
- if (!boot_cpu_has(a->cpuid))
- continue;
-
- BUG_ON(a->replacementlen > a->instrlen);
- instr = a->instr;
- /* vsyscall code is not mapped yet. resolve it manually. */
- if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
- instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
- __inline_memcpy(instr, a->replacement, a->replacementlen);
- diff = a->instrlen - a->replacementlen;
-
- /* Pad the rest with nops */
- for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
- k = diff;
- if (k > ASM_NOP_MAX)
- k = ASM_NOP_MAX;
- __inline_memcpy(instr + i, k8_nops[k], k);
- }
- }
-}
-
-static int no_replacement __initdata = 0;
-
-void __init alternative_instructions(void)
-{
- extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
- if (no_replacement)
- return;
- apply_alternatives(__alt_instructions, __alt_instructions_end);
-}
-
-static int __init noreplacement_setup(char *s)
-{
- no_replacement = 1;
- return 1;
-}
-
-__setup("noreplacement", noreplacement_setup);
-
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
struct edd edd;
#ifdef CONFIG_EDD_MODULE
@@ -728,7 +652,7 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end) {
- reserve_bootmem(crashk_res.start,
+ reserve_bootmem_generic(crashk_res.start,
crashk_res.end - crashk_res.start + 1);
}
#endif
@@ -779,10 +703,6 @@ void __init setup_arch(char **cmdline_p)
e820_setup_gap();
-#ifdef CONFIG_GART_IOMMU
- iommu_hole_init();
-#endif
-
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
@@ -867,24 +787,32 @@ static int nearby_node(int apicid)
static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
- int cpu = smp_processor_id();
unsigned bits;
#ifdef CONFIG_NUMA
+ int cpu = smp_processor_id();
int node = 0;
unsigned apicid = hard_smp_processor_id();
#endif
+ unsigned ecx = cpuid_ecx(0x80000008);
+
+ c->x86_max_cores = (ecx & 0xff) + 1;
- bits = 0;
- while ((1 << bits) < c->x86_max_cores)
- bits++;
+ /* CPU telling us the core id bits shift? */
+ bits = (ecx >> 12) & 0xF;
+
+ /* Otherwise recompute */
+ if (bits == 0) {
+ while ((1 << bits) < c->x86_max_cores)
+ bits++;
+ }
/* Low order bits define the core id (index of core in socket) */
- cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
+ c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
/* Convert the APIC ID into the socket ID */
- phys_proc_id[cpu] = phys_pkg_id(bits);
+ c->phys_proc_id = phys_pkg_id(bits);
#ifdef CONFIG_NUMA
- node = phys_proc_id[cpu];
+ node = c->phys_proc_id;
if (apicid_to_node[apicid] != NUMA_NO_NODE)
node = apicid_to_node[apicid];
if (!node_online(node)) {
@@ -897,7 +825,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
but in the same order as the HT nodeids.
If that doesn't result in a usable node fall back to the
path for the previous case. */
- int ht_nodeid = apicid - (phys_proc_id[0] << bits);
+ int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
if (ht_nodeid >= 0 &&
apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
node = apicid_to_node[ht_nodeid];
@@ -907,15 +835,13 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
}
numa_set_node(cpu, node);
- printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n",
- cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]);
+ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
#endif
#endif
}
-static int __init init_amd(struct cpuinfo_x86 *c)
+static void __init init_amd(struct cpuinfo_x86 *c)
{
- int r;
unsigned level;
#ifdef CONFIG_SMP
@@ -948,8 +874,8 @@ static int __init init_amd(struct cpuinfo_x86 *c)
if (c->x86 >= 6)
set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
- r = get_model_name(c);
- if (!r) {
+ level = get_model_name(c);
+ if (!level) {
switch (c->x86) {
case 15:
/* Should distinguish Models here, but this is only
@@ -964,13 +890,12 @@ static int __init init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & (1<<8))
set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
- if (c->extended_cpuid_level >= 0x80000008) {
- c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
-
+ /* Multi core CPU? */
+ if (c->extended_cpuid_level >= 0x80000008)
amd_detect_cmp(c);
- }
- return r;
+ /* Fix cpuid4 emulation for more */
+ num_cache_leaves = 3;
}
static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -978,13 +903,14 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;
- int cpu = smp_processor_id();
cpuid(1, &eax, &ebx, &ecx, &edx);
- if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
+ if (!cpu_has(c, X86_FEATURE_HT))
return;
+ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
+ goto out;
smp_num_siblings = (ebx & 0xff0000) >> 16;
@@ -999,10 +925,7 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
}
index_msb = get_count_order(smp_num_siblings);
- phys_proc_id[cpu] = phys_pkg_id(index_msb);
-
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- phys_proc_id[cpu]);
+ c->phys_proc_id = phys_pkg_id(index_msb);
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
@@ -1010,13 +933,15 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
core_bits = get_count_order(c->x86_max_cores);
- cpu_core_id[cpu] = phys_pkg_id(index_msb) &
+ c->cpu_core_id = phys_pkg_id(index_msb) &
((1 << core_bits) - 1);
-
- if (c->x86_max_cores > 1)
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
- cpu_core_id[cpu]);
}
+out:
+ if ((c->x86_max_cores * smp_num_siblings) > 1) {
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
+ printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
+ }
+
#endif
}
@@ -1025,15 +950,12 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
*/
static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
{
- unsigned int eax;
+ unsigned int eax, t;
if (c->cpuid_level < 4)
return 1;
- __asm__("cpuid"
- : "=a" (eax)
- : "0" (4), "c" (0)
- : "bx", "dx");
+ cpuid_count(4, 0, &eax, &t, &t, &t);
if (eax & 0x1f)
return ((eax >> 26) + 1);
@@ -1046,16 +968,17 @@ static void srat_detect_node(void)
#ifdef CONFIG_NUMA
unsigned node;
int cpu = smp_processor_id();
+ int apicid = hard_smp_processor_id();
/* Don't do the funky fallback heuristics the AMD version employs
for now. */
- node = apicid_to_node[hard_smp_processor_id()];
+ node = apicid_to_node[apicid];
if (node == NUMA_NO_NODE)
node = first_node(node_online_map);
numa_set_node(cpu, node);
if (acpi_numa > 0)
- printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
+ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
#endif
}
@@ -1065,6 +988,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
unsigned n;
init_intel_cacheinfo(c);
+ if (c->cpuid_level > 9 ) {
+ unsigned eax = cpuid_eax(10);
+ /* Check for version and the number of counters */
+ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
+ set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
+ }
+
n = c->extended_cpuid_level;
if (n >= 0x80000008) {
unsigned eax = cpuid_eax(0x80000008);
@@ -1156,7 +1086,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
}
#ifdef CONFIG_SMP
- phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
+ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
#endif
}
@@ -1283,7 +1213,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
- NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
+ NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
/* Transmeta-defined */
"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -1294,7 +1224,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
/* Other (Linux-defined) */
"cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
"constant_tsc", NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -1364,9 +1294,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
if (smp_num_siblings * c->x86_max_cores > 1) {
int cpu = c - cpu_data;
- seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
+ seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
- seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
+ seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
#endif
@@ -1440,7 +1370,7 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo,
};
-#ifdef CONFIG_INPUT_PCSPKR
+#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
#include <linux/platform_device.h>
static __init int add_pcspkr(void)
{
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 8a691fa6d39..6fe58a634b5 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -3,9 +3,7 @@
* Copyright (C) 1995 Linus Torvalds
* Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
* See setup.c for older changelog.
- * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -31,6 +29,7 @@ char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(_cpu_pda);
struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
@@ -38,6 +37,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
unsigned long __supported_pte_mask __read_mostly = ~0UL;
+EXPORT_SYMBOL(__supported_pte_mask);
static int do_not_nx __cpuinitdata = 0;
/* noexec=on|off
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index e5f5ce7909a..28161170fb0 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -7,8 +7,6 @@
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
* 2000-2002 x86-64 support by Andi Kleen
- *
- * $Id: signal.c,v 1.18 2001/10/17 22:30:37 ak Exp $
*/
#include <linux/sched.h>
@@ -239,7 +237,6 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
rsp = regs->rsp - 128;
/* This is the X/Open sanctioned signal stack switching. */
- /* RED-PEN: redzone on that stack? */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(rsp) == 0)
rsp = current->sas_ss_sp + current->sas_ss_size;
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 4a6628b14d9..5a1c0a3bf87 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -135,10 +135,10 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
cpu = smp_processor_id();
/*
- * orig_rax contains the interrupt vector - 256.
+ * orig_rax contains the negated interrupt vector.
* Use that to determine where the sender put the data.
*/
- sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
+ sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
f = &per_cpu(flush_state, sender);
if (!cpu_isset(cpu, f->flush_cpumask))
@@ -224,6 +224,7 @@ void flush_tlb_current_task(void)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_current_task);
void flush_tlb_mm (struct mm_struct * mm)
{
@@ -244,6 +245,7 @@ void flush_tlb_mm (struct mm_struct * mm)
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
@@ -266,6 +268,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info)
{
@@ -443,6 +446,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
spin_unlock(&call_lock);
return 0;
}
+EXPORT_SYMBOL(smp_call_function);
void smp_stop_cpu(void)
{
@@ -460,7 +464,7 @@ static void smp_really_stop_cpu(void *dummy)
{
smp_stop_cpu();
for (;;)
- asm("hlt");
+ halt();
}
void smp_send_stop(void)
@@ -470,7 +474,7 @@ void smp_send_stop(void)
return;
/* Don't deadlock on the call lock in panic */
if (!spin_trylock(&call_lock)) {
- /* ignore locking because we have paniced anyways */
+ /* ignore locking because we have panicked anyways */
nolock = 1;
}
__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
@@ -520,13 +524,13 @@ asmlinkage void smp_call_function_interrupt(void)
int safe_smp_processor_id(void)
{
- int apicid, i;
+ unsigned apicid, i;
if (disable_apic)
return 0;
apicid = hard_smp_processor_id();
- if (x86_cpu_to_apicid[apicid] == apicid)
+ if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
return apicid;
for (i = 0; i < NR_CPUS; ++i) {
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 71a7222cf9c..9705a6a384f 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -38,7 +38,6 @@
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -63,13 +62,11 @@
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
-/* Package ID of each logical CPU */
-u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
-/* core ID of each logical CPU */
-u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
+EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */
u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
+EXPORT_SYMBOL(cpu_llc_id);
/* Bitmask of currently online CPUs */
cpumask_t cpu_online_map __read_mostly;
@@ -82,18 +79,21 @@ EXPORT_SYMBOL(cpu_online_map);
*/
cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map;
+EXPORT_SYMBOL(cpu_callout_map);
cpumask_t cpu_possible_map;
EXPORT_SYMBOL(cpu_possible_map);
/* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
+EXPORT_SYMBOL(cpu_data);
/* Set when the idlers are all forked */
int smp_threads_ready;
/* representing HT siblings of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
/* representing HT and core siblings of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
@@ -454,10 +454,12 @@ cpumask_t cpu_coregroup_map(int cpu)
struct cpuinfo_x86 *c = cpu_data + cpu;
/*
* For perf, we return last level cache shared map.
- * TBD: when power saving sched policy is added, we will return
- * cpu_core_map when power saving policy is enabled
+ * And for power savings, we return cpu_core_map
*/
- return c->llc_shared_map;
+ if (sched_mc_power_savings || sched_smt_power_savings)
+ return cpu_core_map[cpu];
+ else
+ return c->llc_shared_map;
}
/* representing cpus for which sibling maps can be computed */
@@ -472,8 +474,8 @@ static inline void set_cpu_sibling_map(int cpu)
if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (phys_proc_id[cpu] == phys_proc_id[i] &&
- cpu_core_id[cpu] == cpu_core_id[i]) {
+ if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
+ c[cpu].cpu_core_id == c[i].cpu_core_id) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]);
@@ -500,7 +502,7 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map);
}
- if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
/*
@@ -797,6 +799,8 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
}
+ alternatives_smp_switch(1);
+
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
@@ -1199,8 +1203,8 @@ static void remove_siblinginfo(int cpu)
cpu_clear(cpu, cpu_sibling_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
- phys_proc_id[cpu] = BAD_APICID;
- cpu_core_id[cpu] = BAD_APICID;
+ c[cpu].phys_proc_id = 0;
+ c[cpu].cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map);
}
@@ -1259,6 +1263,8 @@ void __cpu_die(unsigned int cpu)
/* They ack this in play_dead by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
printk ("CPU %d is now offline\n", cpu);
+ if (1 == num_online_cpus())
+ alternatives_smp_switch(0);
return;
}
msleep(100);
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index ecbd34c1093..91f7e678bae 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -7,7 +7,6 @@
* Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
*/
-#include <linux/config.h>
#include <linux/smp.h>
#include <linux/suspend.h>
#include <asm/proto.h>
diff --git a/arch/x86_64/kernel/syscall.c b/arch/x86_64/kernel/syscall.c
index 7c176b3edde..213fd6ab789 100644
--- a/arch/x86_64/kernel/syscall.c
+++ b/arch/x86_64/kernel/syscall.c
@@ -3,7 +3,6 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
-#include <linux/config.h>
#define __NO_STUBS
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
new file mode 100644
index 00000000000..8d4c67f61b8
--- /dev/null
+++ b/arch/x86_64/kernel/tce.c
@@ -0,0 +1,202 @@
+/*
+ * Derived from arch/powerpc/platforms/pseries/iommu.c
+ *
+ * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
+ * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/bootmem.h>
+#include <asm/tce.h>
+#include <asm/calgary.h>
+#include <asm/proto.h>
+
+/* flush a tce at 'tceaddr' to main memory */
+static inline void flush_tce(void* tceaddr)
+{
+ /* a single tce can't cross a cache line */
+ if (cpu_has_clflush)
+ asm volatile("clflush (%0)" :: "r" (tceaddr));
+ else
+ asm volatile("wbinvd":::"memory");
+}
+
+void tce_build(struct iommu_table *tbl, unsigned long index,
+ unsigned int npages, unsigned long uaddr, int direction)
+{
+ u64* tp;
+ u64 t;
+ u64 rpn;
+
+ t = (1 << TCE_READ_SHIFT);
+ if (direction != DMA_TO_DEVICE)
+ t |= (1 << TCE_WRITE_SHIFT);
+
+ tp = ((u64*)tbl->it_base) + index;
+
+ while (npages--) {
+ rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
+ t &= ~TCE_RPN_MASK;
+ t |= (rpn << TCE_RPN_SHIFT);
+
+ *tp = cpu_to_be64(t);
+ flush_tce(tp);
+
+ uaddr += PAGE_SIZE;
+ tp++;
+ }
+}
+
+void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
+{
+ u64* tp;
+
+ tp = ((u64*)tbl->it_base) + index;
+
+ while (npages--) {
+ *tp = cpu_to_be64(0);
+ flush_tce(tp);
+ tp++;
+ }
+}
+
+static inline unsigned int table_size_to_number_of_entries(unsigned char size)
+{
+ /*
+ * size is the order of the table, 0-7
+ * smallest table is 8K entries, so shift result by 13 to
+ * multiply by 8K
+ */
+ return (1 << size) << 13;
+}
+
+static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
+{
+ unsigned int bitmapsz;
+ unsigned int tce_table_index;
+ unsigned long bmppages;
+ int ret;
+
+ tbl->it_busno = dev->bus->number;
+
+ /* set the tce table size - measured in entries */
+ tbl->it_size = table_size_to_number_of_entries(specified_table_size);
+
+ tce_table_index = bus_to_phb(tbl->it_busno);
+ tbl->it_base = (unsigned long)tce_table_kva[tce_table_index];
+ if (!tbl->it_base) {
+ printk(KERN_ERR "Calgary: iommu_table_setparms: "
+ "no table allocated?!\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * number of bytes needed for the bitmap size in number of
+ * entries; we need one bit per entry
+ */
+ bitmapsz = tbl->it_size / BITS_PER_BYTE;
+ bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
+ if (!bmppages) {
+ printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ tbl->it_map = (unsigned long*)bmppages;
+
+ memset(tbl->it_map, 0, bitmapsz);
+
+ tbl->it_hint = 0;
+
+ spin_lock_init(&tbl->it_lock);
+
+ return 0;
+
+done:
+ return ret;
+}
+
+int build_tce_table(struct pci_dev *dev, void __iomem *bbar)
+{
+ struct iommu_table *tbl;
+ int ret;
+
+ if (dev->sysdata) {
+ printk(KERN_ERR "Calgary: dev %p has sysdata %p\n",
+ dev, dev->sysdata);
+ BUG();
+ }
+
+ tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ if (!tbl) {
+ printk(KERN_ERR "Calgary: error allocating iommu_table\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = tce_table_setparms(dev, tbl);
+ if (ret)
+ goto free_tbl;
+
+ tce_free(tbl, 0, tbl->it_size);
+
+ tbl->bbar = bbar;
+
+ /*
+ * NUMA is already using the bus's sysdata pointer, so we use
+ * the bus's pci_dev's sysdata instead.
+ */
+ dev->sysdata = tbl;
+
+ return 0;
+
+free_tbl:
+ kfree(tbl);
+done:
+ return ret;
+}
+
+void* alloc_tce_table(void)
+{
+ unsigned int size;
+
+ size = table_size_to_number_of_entries(specified_table_size);
+ size *= TCE_ENTRY_SIZE;
+
+ return __alloc_bootmem_low(size, size, 0);
+}
+
+void free_tce_table(void *tbl)
+{
+ unsigned int size;
+
+ if (!tbl)
+ return;
+
+ size = table_size_to_number_of_entries(specified_table_size);
+ size *= TCE_ENTRY_SIZE;
+
+ free_bootmem(__pa(tbl), size);
+}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 7392570f975..b9ff75992c1 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -8,7 +8,7 @@
* Copyright (c) 1995 Markus Kuhn
* Copyright (c) 1996 Ingo Molnar
* Copyright (c) 1998 Andrea Arcangeli
- * Copyright (c) 2002 Vojtech Pavlik
+ * Copyright (c) 2002,2006 Vojtech Pavlik
* Copyright (c) 2003 Andi Kleen
* RTC support code taken from arch/i386/kernel/timers/time_hpet.c
*/
@@ -51,14 +51,21 @@ extern int using_apic_timer;
static char *time_init_gtod(void);
DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
DEFINE_SPINLOCK(i8253_lock);
int nohpet __initdata = 0;
static int notsc __initdata = 0;
-#undef HPET_HACK_ENABLE_DANGEROUS
+#define USEC_PER_TICK (USEC_PER_SEC / HZ)
+#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
+#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
+
+#define NS_SCALE 10 /* 2^10, carefully chosen */
+#define US_SCALE 32 /* 2^32, arbitralrily chosen */
unsigned int cpu_khz; /* TSC clocks / usec, not used here */
+EXPORT_SYMBOL(cpu_khz);
static unsigned long hpet_period; /* fsecs / HPET clock */
unsigned long hpet_tick; /* HPET clocks / interrupt */
int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
@@ -90,7 +97,7 @@ static inline unsigned int do_gettimeoffset_tsc(void)
t = get_cycles_sync();
if (t < vxtime.last_tsc)
t = vxtime.last_tsc; /* hack */
- x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
+ x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE;
return x;
}
@@ -98,7 +105,7 @@ static inline unsigned int do_gettimeoffset_hpet(void)
{
/* cap counter read to one tick to avoid inconsistencies */
unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
- return (min(counter,hpet_tick) * vxtime.quot) >> 32;
+ return (min(counter,hpet_tick) * vxtime.quot) >> US_SCALE;
}
unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
@@ -118,7 +125,7 @@ void do_gettimeofday(struct timeval *tv)
seq = read_seqbegin(&xtime_lock);
sec = xtime.tv_sec;
- usec = xtime.tv_nsec / 1000;
+ usec = xtime.tv_nsec / NSEC_PER_USEC;
/* i386 does some correction here to keep the clock
monotonous even when ntpd is fixing drift.
@@ -129,14 +136,14 @@ void do_gettimeofday(struct timeval *tv)
in arch/x86_64/kernel/vsyscall.c and export all needed
variables in vmlinux.lds. -AK */
- t = (jiffies - wall_jiffies) * (1000000L / HZ) +
+ t = (jiffies - wall_jiffies) * USEC_PER_TICK +
do_gettimeoffset();
usec += t;
} while (read_seqretry(&xtime_lock, seq));
- tv->tv_sec = sec + usec / 1000000;
- tv->tv_usec = usec % 1000000;
+ tv->tv_sec = sec + usec / USEC_PER_SEC;
+ tv->tv_usec = usec % USEC_PER_SEC;
}
EXPORT_SYMBOL(do_gettimeofday);
@@ -157,8 +164,8 @@ int do_settimeofday(struct timespec *tv)
write_seqlock_irq(&xtime_lock);
- nsec -= do_gettimeoffset() * 1000 +
- (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
+ nsec -= do_gettimeoffset() * NSEC_PER_USEC +
+ (jiffies - wall_jiffies) * NSEC_PER_TICK;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@@ -288,7 +295,7 @@ unsigned long long monotonic_clock(void)
this_offset = hpet_readl(HPET_COUNTER);
} while (read_seqretry(&xtime_lock, seq));
offset = (this_offset - last_offset);
- offset *= (NSEC_PER_SEC/HZ) / hpet_tick;
+ offset *= NSEC_PER_TICK / hpet_tick;
} else {
do {
seq = read_seqbegin(&xtime_lock);
@@ -297,7 +304,8 @@ unsigned long long monotonic_clock(void)
base = monotonic_base;
} while (read_seqretry(&xtime_lock, seq));
this_offset = get_cycles_sync();
- offset = (this_offset - last_offset)*1000 / cpu_khz;
+ /* FIXME: 1000 or 1000000? */
+ offset = (this_offset - last_offset)*1000 / cpu_khz;
}
return base + offset;
}
@@ -382,7 +390,7 @@ void main_timer_handler(struct pt_regs *regs)
}
monotonic_base +=
- (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
+ (offset - vxtime.last) * NSEC_PER_TICK / hpet_tick;
vxtime.last = offset;
#ifdef CONFIG_X86_PM_TIMER
@@ -391,24 +399,25 @@ void main_timer_handler(struct pt_regs *regs)
#endif
} else {
offset = (((tsc - vxtime.last_tsc) *
- vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
+ vxtime.tsc_quot) >> US_SCALE) - USEC_PER_TICK;
if (offset < 0)
offset = 0;
- if (offset > (USEC_PER_SEC / HZ)) {
- lost = offset / (USEC_PER_SEC / HZ);
- offset %= (USEC_PER_SEC / HZ);
+ if (offset > USEC_PER_TICK) {
+ lost = offset / USEC_PER_TICK;
+ offset %= USEC_PER_TICK;
}
- monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
+ /* FIXME: 1000 or 1000000? */
+ monotonic_base += (tsc - vxtime.last_tsc) * 1000000 / cpu_khz;
vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
if ((((tsc - vxtime.last_tsc) *
- vxtime.tsc_quot) >> 32) < offset)
+ vxtime.tsc_quot) >> US_SCALE) < offset)
vxtime.last_tsc = tsc -
- (((long) offset << 32) / vxtime.tsc_quot) - 1;
+ (((long) offset << US_SCALE) / vxtime.tsc_quot) - 1;
}
if (lost > 0) {
@@ -468,16 +477,15 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
static unsigned int cyc2ns_scale __read_mostly;
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline void set_cyc2ns_scale(unsigned long cpu_khz)
{
- cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
+ cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / cpu_khz;
}
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
- return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+ return (cyc * cyc2ns_scale) >> NS_SCALE;
}
unsigned long long sched_clock(void)
@@ -490,7 +498,7 @@ unsigned long long sched_clock(void)
Disadvantage is a small drift between CPUs in some configurations,
but that should be tolerable. */
if (__vxtime.mode == VXTIME_HPET)
- return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
+ return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> US_SCALE;
#endif
/* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
@@ -633,7 +641,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
- vxtime.tsc_quot = (1000L << 32) / cpu_khz;
+ vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
}
set_cyc2ns_scale(cpu_khz_ref);
@@ -789,8 +797,8 @@ static int hpet_timer_stop_set_go(unsigned long tick)
if (hpet_use_timer) {
hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
HPET_TN_32BIT, HPET_T0_CFG);
- hpet_writel(hpet_tick, HPET_T0_CMP);
- hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
+ hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */
+ hpet_writel(hpet_tick, HPET_T0_CMP); /* period */
cfg |= HPET_CFG_LEGACY;
}
/*
@@ -825,8 +833,7 @@ static int hpet_init(void)
if (hpet_period < 100000 || hpet_period > 100000000)
return -1;
- hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
- hpet_period;
+ hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;
hpet_use_timer = (id & HPET_ID_LEGSUP);
@@ -882,7 +889,7 @@ int __init time_setup(char *str)
}
static struct irqaction irq0 = {
- timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
+ timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
};
void __init time_init(void)
@@ -890,18 +897,6 @@ void __init time_init(void)
char *timename;
char *gtod;
-#ifdef HPET_HACK_ENABLE_DANGEROUS
- if (!vxtime.hpet_address) {
- printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
- "manually!\n");
- outl(0x800038a0, 0xcf8);
- outl(0xff000001, 0xcfc);
- outl(0x800038a0, 0xcf8);
- vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
- printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
- "at %#lx.\n", vxtime.hpet_address);
- }
-#endif
if (nohpet)
vxtime.hpet_address = 0;
@@ -912,7 +907,7 @@ void __init time_init(void)
-xtime.tv_sec, -xtime.tv_nsec);
if (!hpet_init())
- vxtime_hz = (1000000000000000L + hpet_period / 2) / hpet_period;
+ vxtime_hz = (FSEC_PER_SEC + hpet_period / 2) / hpet_period;
else
vxtime.hpet_address = 0;
@@ -941,8 +936,8 @@ void __init time_init(void)
vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod);
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
- vxtime.quot = (1000000L << 32) / vxtime_hz;
- vxtime.tsc_quot = (1000L << 32) / cpu_khz;
+ vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz;
+ vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
vxtime.last_tsc = get_cycles_sync();
setup_irq(0, &irq0);
@@ -956,10 +951,10 @@ void __init time_init(void)
__cpuinit int unsynchronized_tsc(void)
{
#ifdef CONFIG_SMP
- if (oem_force_hpet_timer())
+ if (apic_is_clustered_box())
return 1;
/* Intel systems are normally all synchronized. Exceptions
- are handled in the OEM check above. */
+ are handled in the check above. */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
return 0;
#endif
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index cea335e8746..5a5311d3de0 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -6,15 +6,12 @@
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
- *
- * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'entry.S'.
*/
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -31,6 +28,7 @@
#include <linux/nmi.h>
#include <linux/kprobes.h>
#include <linux/kexec.h>
+#include <linux/unwind.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -41,7 +39,7 @@
#include <asm/i387.h>
#include <asm/kdebug.h>
#include <asm/processor.h>
-
+#include <asm/unwind.h>
#include <asm/smp.h>
#include <asm/pgalloc.h>
#include <asm/pda.h>
@@ -71,6 +69,7 @@ asmlinkage void machine_check(void);
asmlinkage void spurious_interrupt_bug(void);
ATOMIC_NOTIFIER_HEAD(die_chain);
+EXPORT_SYMBOL(die_chain);
int register_die_notifier(struct notifier_block *nb)
{
@@ -107,7 +106,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
preempt_enable_no_resched();
}
-static int kstack_depth_to_print = 10;
+static int kstack_depth_to_print = 12;
+static int call_trace = 1;
#ifdef CONFIG_KALLSYMS
#include <linux/kallsyms.h>
@@ -191,6 +191,25 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
return NULL;
}
+static int show_trace_unwind(struct unwind_frame_info *info, void *context)
+{
+ int i = 11, n = 0;
+
+ while (unwind(info) == 0 && UNW_PC(info)) {
+ ++n;
+ if (i > 50) {
+ printk("\n ");
+ i = 7;
+ } else
+ i += printk(" ");
+ i += printk_address(UNW_PC(info));
+ if (arch_unw_user_mode(info))
+ break;
+ }
+ printk("\n");
+ return n;
+}
+
/*
* x86-64 can have upto three kernel stacks:
* process stack
@@ -198,15 +217,39 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
*/
-void show_trace(unsigned long *stack)
+void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
{
const unsigned cpu = safe_smp_processor_id();
unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
- int i;
+ int i = 11;
unsigned used = 0;
printk("\nCall Trace:");
+ if (!tsk)
+ tsk = current;
+
+ if (call_trace >= 0) {
+ int unw_ret = 0;
+ struct unwind_frame_info info;
+
+ if (regs) {
+ if (unwind_init_frame_info(&info, tsk, regs) == 0)
+ unw_ret = show_trace_unwind(&info, NULL);
+ } else if (tsk == current)
+ unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
+ else {
+ if (unwind_init_blocked(&info, tsk) == 0)
+ unw_ret = show_trace_unwind(&info, NULL);
+ }
+ if (unw_ret > 0) {
+ if (call_trace > 0)
+ return;
+ printk("Legacy call trace:");
+ i = 18;
+ }
+ }
+
#define HANDLE_STACK(cond) \
do while (cond) { \
unsigned long addr = *stack++; \
@@ -229,7 +272,7 @@ void show_trace(unsigned long *stack)
} \
} while (0)
- for(i = 11; ; ) {
+ for(; ; ) {
const char *id;
unsigned long *estack_end;
estack_end = in_exception_stack(cpu, (unsigned long)stack,
@@ -264,7 +307,7 @@ void show_trace(unsigned long *stack)
printk("\n");
}
-void show_stack(struct task_struct *tsk, unsigned long * rsp)
+static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
{
unsigned long *stack;
int i;
@@ -298,7 +341,12 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
printk("%016lx ", *stack++);
touch_nmi_watchdog();
}
- show_trace((unsigned long *)rsp);
+ show_trace(tsk, regs, rsp);
+}
+
+void show_stack(struct task_struct *tsk, unsigned long * rsp)
+{
+ _show_stack(tsk, NULL, rsp);
}
/*
@@ -307,7 +355,7 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
void dump_stack(void)
{
unsigned long dummy;
- show_trace(&dummy);
+ show_trace(NULL, NULL, &dummy);
}
EXPORT_SYMBOL(dump_stack);
@@ -334,7 +382,7 @@ void show_registers(struct pt_regs *regs)
if (in_kernel) {
printk("Stack: ");
- show_stack(NULL, (unsigned long*)rsp);
+ _show_stack(NULL, regs, (unsigned long*)rsp);
printk("\nCode: ");
if (regs->rip < PAGE_OFFSET)
@@ -383,6 +431,7 @@ void out_of_line_bug(void)
{
BUG();
}
+EXPORT_SYMBOL(out_of_line_bug);
#endif
static DEFINE_SPINLOCK(die_lock);
@@ -1012,3 +1061,14 @@ static int __init kstack_setup(char *s)
}
__setup("kstack=", kstack_setup);
+static int __init call_trace_setup(char *s)
+{
+ if (strcmp(s, "old") == 0)
+ call_trace = -1;
+ else if (strcmp(s, "both") == 0)
+ call_trace = 0;
+ else if (strcmp(s, "new") == 0)
+ call_trace = 1;
+ return 1;
+}
+__setup("call_trace=", call_trace_setup);
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b81f473c4a1..7c4de31471d 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -6,7 +6,6 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
-#include <linux/config.h>
#undef i386 /* in case the preprocessor is a 32bit one */
@@ -45,6 +44,15 @@ SECTIONS
RODATA
+#ifdef CONFIG_STACK_UNWIND
+ . = ALIGN(8);
+ .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
+ __start_unwind = .;
+ *(.eh_frame)
+ __end_unwind = .;
+ }
+#endif
+
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
*(.data)
@@ -131,6 +139,26 @@ SECTIONS
*(.data.page_aligned)
}
+ /* might get freed after init */
+ . = ALIGN(4096);
+ __smp_alt_begin = .;
+ __smp_alt_instructions = .;
+ .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
+ *(.smp_altinstructions)
+ }
+ __smp_alt_instructions_end = .;
+ . = ALIGN(8);
+ __smp_locks = .;
+ .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+ *(.smp_locks)
+ }
+ __smp_locks_end = .;
+ .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
+ *(.smp_altinstr_replacement)
+ }
+ . = ALIGN(4096);
+ __smp_alt_end = .;
+
. = ALIGN(4096); /* Init code and data */
__init_begin = .;
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 9468fb20b0b..f603037df16 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -107,7 +107,7 @@ static __always_inline long time_syscall(long *t)
int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
{
- if (unlikely(!__sysctl_vsyscall))
+ if (!__sysctl_vsyscall)
return gettimeofday(tv,tz);
if (tv)
do_vgettimeofday(tv);
@@ -120,7 +120,7 @@ int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
* unlikely */
time_t __vsyscall(1) vtime(time_t *t)
{
- if (unlikely(!__sysctl_vsyscall))
+ if (!__sysctl_vsyscall)
return time_syscall(t);
else if (t)
*t = __xtime.tv_sec;
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 1def21c9f7c..370952c4ff2 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -1,66 +1,21 @@
+/* Exports for assembly files.
+ All C exports should go in the respective C files. */
+
#include <linux/config.h>
#include <linux/module.h>
#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
-#include <linux/pm.h>
-#include <linux/pci.h>
-#include <linux/apm_bios.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/syscalls.h>
-#include <linux/tty.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
-#include <asm/i387.h>
#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/irq.h>
-#include <asm/mmx.h>
-#include <asm/desc.h>
#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/nmi.h>
-#include <asm/kdebug.h>
-#include <asm/unistd.h>
-#include <asm/tlbflush.h>
-#include <asm/kdebug.h>
-
-extern spinlock_t rtc_lock;
-#ifdef CONFIG_SMP
-extern void __write_lock_failed(rwlock_t *rw);
-extern void __read_lock_failed(rwlock_t *rw);
-#endif
-
-/* platform dependent support */
-EXPORT_SYMBOL(boot_cpu_data);
-//EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(ioremap_nocache);
-EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(pm_idle);
-EXPORT_SYMBOL(pm_power_off);
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(ip_compute_csum);
-/* Delay loops */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(__delay);
-EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
@@ -71,42 +26,20 @@ EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
-EXPORT_SYMBOL(strncpy_from_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(clear_user);
-EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(copy_user_generic);
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_to_user);
-EXPORT_SYMBOL(copy_in_user);
-EXPORT_SYMBOL(strnlen_user);
-
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_mem_start);
-#endif
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
-EXPORT_SYMBOL(_cpu_pda);
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_data);
+extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
+extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
EXPORT_SYMBOL(__write_lock_failed);
EXPORT_SYMBOL(__read_lock_failed);
-
-EXPORT_SYMBOL(smp_call_function);
-EXPORT_SYMBOL(cpu_callout_map);
-#endif
-
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(screen_info);
#endif
-EXPORT_SYMBOL(rtc_lock);
-
-EXPORT_SYMBOL_GPL(set_nmi_callback);
-EXPORT_SYMBOL_GPL(unset_nmi_callback);
-
/* Export string functions. We normally rely on gcc builtin for most of these,
but gcc sometimes decides not to inline them. */
#undef memcpy
@@ -114,51 +47,14 @@ EXPORT_SYMBOL_GPL(unset_nmi_callback);
#undef memmove
extern void * memset(void *,int,__kernel_size_t);
-extern size_t strlen(const char *);
-extern void * memmove(void * dest,const void *src,size_t count);
extern void * memcpy(void *,const void *,__kernel_size_t);
extern void * __memcpy(void *,const void *,__kernel_size_t);
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
-#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
-/* prototypes are wrong, these are assembly with custom calling functions */
-extern void rwsem_down_read_failed_thunk(void);
-extern void rwsem_wake_thunk(void);
-extern void rwsem_downgrade_thunk(void);
-extern void rwsem_down_write_failed_thunk(void);
-EXPORT_SYMBOL(rwsem_down_read_failed_thunk);
-EXPORT_SYMBOL(rwsem_wake_thunk);
-EXPORT_SYMBOL(rwsem_downgrade_thunk);
-EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
-#endif
-
EXPORT_SYMBOL(empty_zero_page);
-
-EXPORT_SYMBOL(die_chain);
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_sibling_map);
-EXPORT_SYMBOL(smp_num_siblings);
-#endif
-
-#ifdef CONFIG_BUG
-EXPORT_SYMBOL(out_of_line_bug);
-#endif
-
EXPORT_SYMBOL(init_level4_pgt);
-
-extern unsigned long __supported_pte_mask;
-EXPORT_SYMBOL(__supported_pte_mask);
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(flush_tlb_page);
-#endif
-
-EXPORT_SYMBOL(cpu_khz);
-
EXPORT_SYMBOL(load_gs_index);
diff --git a/arch/x86_64/lib/csum-partial.c b/arch/x86_64/lib/csum-partial.c
index 5384e227cdf..c493735218d 100644
--- a/arch/x86_64/lib/csum-partial.c
+++ b/arch/x86_64/lib/csum-partial.c
@@ -147,4 +147,5 @@ unsigned short ip_compute_csum(unsigned char * buff, int len)
{
return csum_fold(csum_partial(buff,len,0));
}
+EXPORT_SYMBOL(ip_compute_csum);
diff --git a/arch/x86_64/lib/csum-wrappers.c b/arch/x86_64/lib/csum-wrappers.c
index 94323f20816..b1320ec5842 100644
--- a/arch/x86_64/lib/csum-wrappers.c
+++ b/arch/x86_64/lib/csum-wrappers.c
@@ -109,6 +109,7 @@ csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len,
{
return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL);
}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
__u32 len, unsigned short proto, unsigned int sum)
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index 03c460cbdd1..b6cd3cca2f4 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -9,6 +9,7 @@
*/
#include <linux/config.h>
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <asm/delay.h>
@@ -36,18 +37,22 @@ void __delay(unsigned long loops)
}
while((now-bclock) < loops);
}
+EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
__delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32);
}
+EXPORT_SYMBOL(__const_udelay);
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
}
+EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/x86_64/lib/memmove.c b/arch/x86_64/lib/memmove.c
index e93d5255fdc..751ebae8ec4 100644
--- a/arch/x86_64/lib/memmove.c
+++ b/arch/x86_64/lib/memmove.c
@@ -3,12 +3,13 @@
*/
#define _STRING_C
#include <linux/string.h>
+#include <linux/module.h>
#undef memmove
void *memmove(void * dest,const void *src,size_t count)
{
if (dest < src) {
- __inline_memcpy(dest,src,count);
+ return memcpy(dest,src,count);
} else {
char *p = (char *) dest + count;
char *s = (char *) src + count;
@@ -17,3 +18,4 @@ void *memmove(void * dest,const void *src,size_t count)
}
return dest;
}
+EXPORT_SYMBOL(memmove);
diff --git a/arch/x86_64/lib/usercopy.c b/arch/x86_64/lib/usercopy.c
index 9bc2c295818..893d43f838c 100644
--- a/arch/x86_64/lib/usercopy.c
+++ b/arch/x86_64/lib/usercopy.c
@@ -5,6 +5,7 @@
* Copyright 1997 Linus Torvalds
* Copyright 2002 Andi Kleen <ak@suse.de>
*/
+#include <linux/module.h>
#include <asm/uaccess.h>
/*
@@ -47,15 +48,17 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
__do_strncpy_from_user(dst, src, count, res);
return res;
}
+EXPORT_SYMBOL(__strncpy_from_user);
long
strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1))
- __do_strncpy_from_user(dst, src, count, res);
+ return __strncpy_from_user(dst, src, count);
return res;
}
+EXPORT_SYMBOL(strncpy_from_user);
/*
* Zero Userspace
@@ -94,7 +97,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
[zero] "r" (0UL), [eight] "r" (8UL));
return size;
}
-
+EXPORT_SYMBOL(__clear_user);
unsigned long clear_user(void __user *to, unsigned long n)
{
@@ -102,6 +105,7 @@ unsigned long clear_user(void __user *to, unsigned long n)
return __clear_user(to, n);
return n;
}
+EXPORT_SYMBOL(clear_user);
/*
* Return the size of a string (including the ending 0)
@@ -125,6 +129,7 @@ long __strnlen_user(const char __user *s, long n)
s++;
}
}
+EXPORT_SYMBOL(__strnlen_user);
long strnlen_user(const char __user *s, long n)
{
@@ -132,6 +137,7 @@ long strnlen_user(const char __user *s, long n)
return 0;
return __strnlen_user(s, n);
}
+EXPORT_SYMBOL(strnlen_user);
long strlen_user(const char __user *s)
{
@@ -147,6 +153,7 @@ long strlen_user(const char __user *s)
s++;
}
}
+EXPORT_SYMBOL(strlen_user);
unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
{
@@ -155,3 +162,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, unsigned le
}
return len;
}
+EXPORT_SYMBOL(copy_in_user);
+
diff --git a/arch/x86_64/mm/extable.c b/arch/x86_64/mm/extable.c
index 2d78f9fb403..79ac6e7100a 100644
--- a/arch/x86_64/mm/extable.c
+++ b/arch/x86_64/mm/extable.c
@@ -2,7 +2,6 @@
* linux/arch/x86_64/mm/extable.c
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/init.h>
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 55250593d8c..5afcf6eb00f 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -5,7 +5,6 @@
* Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -41,6 +40,41 @@
#define PF_RSVD (1<<3)
#define PF_INSTR (1<<4)
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+/* Hook to register for page fault notifications */
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+ vmalloc_sync_all();
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
void bust_spinlocks(int yes)
{
int loglevel_save = console_loglevel;
@@ -160,7 +194,7 @@ void dump_pagetable(unsigned long address)
printk("PGD %lx ", pgd_val(*pgd));
if (!pgd_present(*pgd)) goto ret;
- pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
+ pud = pud_offset(pgd, address);
if (bad_address(pud)) goto bad;
printk("PUD %lx ", pud_val(*pud));
if (!pud_present(*pud)) goto ret;
@@ -348,7 +382,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (vmalloc_fault(address) >= 0)
return;
}
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
SIGSEGV) == NOTIFY_STOP)
return;
/*
@@ -358,7 +392,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
SIGSEGV) == NOTIFY_STOP)
return;
@@ -383,7 +417,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunatly, in the case of an
- * erroneous fault occuring in a code path which already holds mmap_sem
+ * erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
@@ -410,8 +444,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (error_code & 4) {
- // XXX: align red zone size with ABI
- if (address + 128 < regs->rsp)
+ /* Allow userspace just enough access below the stack pointer
+ * to let the 'enter' instruction work.
+ */
+ if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
goto bad_area;
}
if (expand_stack(vma, address))
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 4ba34e95d83..d14fb2dfbfc 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -6,7 +6,6 @@
* Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -23,6 +22,7 @@
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
+#include <linux/poison.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/memory_hotplug.h>
@@ -41,8 +41,6 @@
#include <asm/proto.h>
#include <asm/smp.h>
#include <asm/sections.h>
-#include <asm/dma-mapping.h>
-#include <asm/swiotlb.h>
#ifndef Dprintk
#define Dprintk(x...)
@@ -90,8 +88,6 @@ void show_mem(void)
printk(KERN_INFO "%lu pages swap cached\n",cached);
}
-/* References to section boundaries */
-
int after_bootmem;
static __init void *spp_getpage(void)
@@ -261,9 +257,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
unsigned long entry;
- if (address > end) {
- for (; i < PTRS_PER_PMD; i++, pmd++)
- set_pmd(pmd, __pmd(0));
+ if (address >= end) {
+ if (!after_bootmem)
+ for (; i < PTRS_PER_PMD; i++, pmd++)
+ set_pmd(pmd, __pmd(0));
break;
}
entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
@@ -341,7 +338,8 @@ static void __init find_early_table_space(unsigned long end)
table_end = table_start;
early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
- end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
+ end, table_start << PAGE_SHIFT,
+ (table_start << PAGE_SHIFT) + tables);
}
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
@@ -372,7 +370,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end)
pud_t *pud;
if (after_bootmem)
- pud = pud_offset_k(pgd, start & PGDIR_MASK);
+ pud = pud_offset(pgd, start & PGDIR_MASK);
else
pud = alloc_low_page(&map, &pud_phys);
@@ -508,8 +506,6 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
/*
* Memory hotplug specific functions
*/
-#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
-
void online_page(struct page *page)
{
ClearPageReserved(page);
@@ -519,31 +515,17 @@ void online_page(struct page *page)
num_physpages++;
}
-#ifndef CONFIG_MEMORY_HOTPLUG
+#ifdef CONFIG_MEMORY_HOTPLUG
/*
- * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
- * just online the pages.
+ * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
+ * via probe interface of sysfs. If acpi notifies hot-add event, then it
+ * can tell node id by searching dsdt. But, probe interface doesn't have
+ * node id. So, return 0 as node id at this time.
*/
-int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 start)
{
- int err = -EIO;
- unsigned long pfn;
- unsigned long total = 0, mem = 0;
- for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
- if (pfn_valid(pfn)) {
- online_page(pfn_to_page(pfn));
- err = 0;
- mem++;
- }
- total++;
- }
- if (!err) {
- z->spanned_pages += total;
- z->present_pages += mem;
- z->zone_pgdat->node_spanned_pages += total;
- z->zone_pgdat->node_present_pages += mem;
- }
- return err;
+ return 0;
}
#endif
@@ -551,9 +533,9 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
* Memory is added always to NORMAL zone. This means you will never get
* additional DMA/DMA32 memory.
*/
-int add_memory(u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size)
{
- struct pglist_data *pgdat = NODE_DATA(0);
+ struct pglist_data *pgdat = NODE_DATA(nid);
struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -570,7 +552,7 @@ error:
printk("%s: Problem encountered in __add_pages!\n", __func__);
return ret;
}
-EXPORT_SYMBOL_GPL(add_memory);
+EXPORT_SYMBOL_GPL(arch_add_memory);
int remove_memory(u64 start, u64 size)
{
@@ -578,7 +560,33 @@ int remove_memory(u64 start, u64 size)
}
EXPORT_SYMBOL_GPL(remove_memory);
-#endif
+#else /* CONFIG_MEMORY_HOTPLUG */
+/*
+ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
+ * just online the pages.
+ */
+int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
+{
+ int err = -EIO;
+ unsigned long pfn;
+ unsigned long total = 0, mem = 0;
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
+ if (pfn_valid(pfn)) {
+ online_page(pfn_to_page(pfn));
+ err = 0;
+ mem++;
+ }
+ total++;
+ }
+ if (!err) {
+ z->spanned_pages += total;
+ z->present_pages += mem;
+ z->zone_pgdat->node_spanned_pages += total;
+ z->zone_pgdat->node_present_pages += mem;
+ }
+ return err;
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
kcore_vsyscall;
@@ -587,10 +595,7 @@ void __init mem_init(void)
{
long codesize, reservedpages, datasize, initsize;
-#ifdef CONFIG_SWIOTLB
- pci_swiotlb_init();
-#endif
- no_iommu_init();
+ pci_iommu_alloc();
/* How many end-of-memory variables you have, grandma! */
max_low_pfn = end_pfn;
@@ -644,34 +649,44 @@ void __init mem_init(void)
#endif
}
-void free_initmem(void)
+void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr;
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ if (begin >= end)
+ return;
+
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
- memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
+ memset((void *)(addr & ~(PAGE_SIZE-1)),
+ POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
- memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
- printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
+}
+
+void free_initmem(void)
+{
+ memset(__initdata_begin, POISON_FREE_INITDATA,
+ __initdata_end - __initdata_begin);
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
}
#ifdef CONFIG_DEBUG_RODATA
-extern char __start_rodata, __end_rodata;
void mark_rodata_ro(void)
{
- unsigned long addr = (unsigned long)&__start_rodata;
+ unsigned long addr = (unsigned long)__start_rodata;
- for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
printk ("Write protecting the kernel read-only data: %luk\n",
- (&__end_rodata - &__start_rodata) >> 10);
+ (__end_rodata - __start_rodata) >> 10);
/*
* change_page_attr_addr() requires a global_flush_tlb() call after it.
@@ -686,15 +701,7 @@ void mark_rodata_ro(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start >= end)
- return;
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_init_pages("initrd memory", start, end);
}
#endif
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index ae207064201..45d7d823c3b 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
@@ -219,6 +220,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
}
return (__force void __iomem *) (offset + (char *)addr);
}
+EXPORT_SYMBOL(__ioremap);
/**
* ioremap_nocache - map bus memory into CPU space
@@ -246,6 +248,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
return __ioremap(phys_addr, size, _PAGE_PCD);
}
+EXPORT_SYMBOL(ioremap_nocache);
/**
* iounmap - Free a IO remapping
@@ -291,3 +294,5 @@ void iounmap(volatile void __iomem *addr)
BUG_ON(p != o || o == NULL);
kfree(p);
}
+EXPORT_SYMBOL(iounmap);
+
diff --git a/arch/x86_64/mm/mmap.c b/arch/x86_64/mm/mmap.c
index 43e9b99bdf2..80bba0dc000 100644
--- a/arch/x86_64/mm/mmap.c
+++ b/arch/x86_64/mm/mmap.c
@@ -1,7 +1,6 @@
/* Copyright 2005 Andi Kleen, SuSE Labs.
* Licensed under GPL, v.2
*/
-#include <linux/config.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/random.h>
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index 531ad21447b..2685b1f3671 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -3,7 +3,6 @@
* Thanks to Ben LaHaise for precious feedback.
*/
-#include <linux/config.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 474df22c6ed..502fce65e96 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -30,7 +30,6 @@
static struct acpi_table_slit *acpi_slit;
static nodemask_t nodes_parsed __initdata;
-static nodemask_t nodes_found __initdata;
static struct bootnode nodes[MAX_NUMNODES] __initdata;
static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
static int found_add_area __initdata;
@@ -38,33 +37,14 @@ int hotadd_percent __initdata = 0;
#ifndef RESERVE_HOTADD
#define hotadd_percent 0 /* Ignore all settings */
#endif
-static u8 pxm2node[256] = { [0 ... 255] = 0xff };
/* Too small nodes confuse the VM badly. Usually they result
from BIOS bugs. */
#define NODE_MIN_SIZE (4*1024*1024)
-static int node_to_pxm(int n);
-
-int pxm_to_node(int pxm)
-{
- if ((unsigned)pxm >= 256)
- return -1;
- /* Extend 0xff to (int)-1 */
- return (signed char)pxm2node[pxm];
-}
-
static __init int setup_node(int pxm)
{
- unsigned node = pxm2node[pxm];
- if (node == 0xff) {
- if (nodes_weight(nodes_found) >= MAX_NUMNODES)
- return -1;
- node = first_unset_node(nodes_found);
- node_set(node, nodes_found);
- pxm2node[pxm] = node;
- }
- return pxm2node[pxm];
+ return acpi_map_pxm_to_node(pxm);
}
static __init int conflicting_nodes(unsigned long start, unsigned long end)
@@ -440,17 +420,6 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
return 0;
}
-static int node_to_pxm(int n)
-{
- int i;
- if (pxm2node[n] == n)
- return n;
- for (i = 0; i < 256; i++)
- if (pxm2node[i] == n)
- return i;
- return 0;
-}
-
void __init srat_reserve_add_area(int nodeid)
{
if (found_add_area && nodes_add[nodeid].end) {
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index 3acf60ded2a..b50a7c7c47f 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -2,6 +2,7 @@
#include <linux/pci.h>
#include <asm/mpspec.h>
#include <linux/cpumask.h>
+#include <asm/k8.h>
/*
* This discovers the pcibus <-> node mapping on AMD K8.
@@ -18,7 +19,6 @@
#define NR_LDT_BUS_NUMBER_REGISTERS 3
#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF)
#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF)
-#define PCI_DEVICE_ID_K8HTCONFIG 0x1100
/**
* fill_mp_bus_to_cpumask()
@@ -28,8 +28,7 @@
__init static int
fill_mp_bus_to_cpumask(void)
{
- struct pci_dev *nb_dev = NULL;
- int i, j;
+ int i, j, k;
u32 ldtbus, nid;
static int lbnr[3] = {
LDT_BUS_NUMBER_REGISTER_0,
@@ -37,8 +36,9 @@ fill_mp_bus_to_cpumask(void)
LDT_BUS_NUMBER_REGISTER_2
};
- while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) {
+ cache_k8_northbridges();
+ for (k = 0; k < num_k8_northbridges; k++) {
+ struct pci_dev *nb_dev = k8_northbridges[k];
pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid);
for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) {
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index a2060e4d5de..3c55c76c6fd 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -13,7 +13,10 @@
#include "pci.h"
-#define MMCONFIG_APER_SIZE (256*1024*1024)
+/* aperture is up to 256MB but BIOS may reserve less */
+#define MMCONFIG_APER_MIN (2 * 1024*1024)
+#define MMCONFIG_APER_MAX (256 * 1024*1024)
+
/* Verify the first 16 busses. We assume that systems with more busses
get MCFG right. */
#define MAX_CHECK_BUS 16
@@ -175,9 +178,10 @@ void __init pci_mmcfg_init(void)
return;
if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
- pci_mmcfg_config[0].base_address + MMCONFIG_APER_SIZE,
+ pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
- printk(KERN_ERR "PCI: BIOS Bug: MCFG area is not E820-reserved\n");
+ printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
+ pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}
@@ -190,7 +194,8 @@ void __init pci_mmcfg_init(void)
}
for (i = 0; i < pci_mmcfg_config_num; ++i) {
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
- pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, MMCONFIG_APER_SIZE);
+ pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address,
+ MMCONFIG_APER_MAX);
if (!pci_mmcfg_virt[i].virt) {
printk("PCI: Cannot map mmconfig aperture for segment %d\n",
pci_mmcfg_config[i].pci_segment_group_number);