From 4db2ce0199f04b6e99999f22e28ef9a0ae5f0d2f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 14 Sep 2005 21:47:01 -0700 Subject: [LIB]: Consolidate _atomic_dec_and_lock() Several implementations were essentialy a common piece of C code using the cmpxchg() macro. Put the implementation in one spot that everyone can share, and convert sparc64 over to using this. Alpha is the lone arch-specific implementation, which codes up a special fast path for the common case in order to avoid GP reloading which a pure C version would require. Signed-off-by: David S. Miller --- arch/x86_64/Kconfig | 5 ----- arch/x86_64/kernel/x8664_ksyms.c | 4 ---- arch/x86_64/lib/Makefile | 2 -- arch/x86_64/lib/dec_and_lock.c | 40 ---------------------------------------- 4 files changed, 51 deletions(-) delete mode 100644 arch/x86_64/lib/dec_and_lock.c (limited to 'arch/x86_64') diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index e63323e03ea..0969d570f3b 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -277,11 +277,6 @@ source "mm/Kconfig" config HAVE_ARCH_EARLY_PFN_TO_NID def_bool y -config HAVE_DEC_LOCK - bool - depends on SMP - default y - config NR_CPUS int "Maximum number of CPUs (2-256)" range 2 256 diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c index 68ec03070e5..fd99ddd009b 100644 --- a/arch/x86_64/kernel/x8664_ksyms.c +++ b/arch/x86_64/kernel/x8664_ksyms.c @@ -178,10 +178,6 @@ EXPORT_SYMBOL(rwsem_down_write_failed_thunk); EXPORT_SYMBOL(empty_zero_page); -#ifdef CONFIG_HAVE_DEC_LOCK -EXPORT_SYMBOL(_atomic_dec_and_lock); -#endif - EXPORT_SYMBOL(die_chain); EXPORT_SYMBOL(register_die_notifier); diff --git a/arch/x86_64/lib/Makefile b/arch/x86_64/lib/Makefile index 6b26a1c1e9f..bba5db6cebd 100644 --- a/arch/x86_64/lib/Makefile +++ b/arch/x86_64/lib/Makefile @@ -10,5 +10,3 @@ lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \ usercopy.o getuser.o putuser.o \ thunk.o clear_page.o copy_page.o bitstr.o bitops.o lib-y += memcpy.o memmove.o memset.o copy_user.o - -lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o diff --git a/arch/x86_64/lib/dec_and_lock.c b/arch/x86_64/lib/dec_and_lock.c deleted file mode 100644 index ab43394dc77..00000000000 --- a/arch/x86_64/lib/dec_and_lock.c +++ /dev/null @@ -1,40 +0,0 @@ -/* - * x86 version of "atomic_dec_and_lock()" using - * the atomic "cmpxchg" instruction. - * - * (For CPU's lacking cmpxchg, we use the slow - * generic version, and this one never even gets - * compiled). - */ - -#include -#include - -int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) -{ - int counter; - int newcount; - -repeat: - counter = atomic_read(atomic); - newcount = counter-1; - - if (!newcount) - goto slow_path; - - asm volatile("lock; cmpxchgl %1,%2" - :"=a" (newcount) - :"r" (newcount), "m" (atomic->counter), "0" (counter)); - - /* If the above failed, "eax" will have changed */ - if (newcount != counter) - goto repeat; - return 0; - -slow_path: - spin_lock(lock); - if (atomic_dec_and_test(atomic)) - return 1; - spin_unlock(lock); - return 0; -} -- cgit v1.2.3-70-g09d2 From b9491ac835829e6a34e2bbaa8adad261c71bf990 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 16 Sep 2005 19:27:54 -0700 Subject: [PATCH] x86_64: e820.c needs module.h For EXPORT_SYMBOL. Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/e820.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86_64') diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index 4e34b0f9d61..ab3f87aaff7 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c @@ -17,6 +17,8 @@ #include #include #include +#include + #include #include #include -- cgit v1.2.3-70-g09d2 From bc5e8fdfc622b03acf5ac974a1b8b26da6511c99 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 17 Sep 2005 15:41:04 -0700 Subject: x86-64/smp: fix random SIGSEGV issues They seem to have been due to AMD errata 63/122; the fix is to disable TLB flush filtering in SMP configurations. Confirmed to fix the problem by Andrew Walrond [ Let's see if we'll have a better fix eventually, this is the Q&D "let's get this fixed and out there" version ] Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/x86_64') diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 351d8d64c2f..238f73e1a83 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -831,11 +831,26 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) #endif } +#define HWCR 0xc0010015 + static int __init init_amd(struct cpuinfo_x86 *c) { int r; int level; +#ifdef CONFIG_SMP + unsigned long value; + + // Disable TLB flush filter by setting HWCR.FFDIS: + // bit 6 of msr C001_0015 + // + // Errata 63 for SH-B3 steppings + // Errata 122 for all(?) steppings + rdmsrl(HWCR, value); + value |= 1 << 6; + wrmsrl(HWCR, value); +#endif + /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_bit(0*32+31, &c->x86_capability); -- cgit v1.2.3-70-g09d2 From 89d7cbf73e1a85241eb42339f6cb7a429fec178c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 21 Sep 2005 09:55:44 -0700 Subject: [PATCH] update URL for HPET spec. Correct URL for HPET spec. Signed-off-by: Randy Dunlap Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86_64/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 0969d570f3b..21afa69a086 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -308,7 +308,7 @@ config HPET_TIMER present. The HPET provides a stable time base on SMP systems, unlike the TSC, but it is more expensive to access, as it is off-chip. You can find the HPET spec at - . + . config X86_PM_TIMER bool "PM timer" -- cgit v1.2.3-70-g09d2 From 6c132b5fe6579ed4b4892c02fe6c05f7e3afc579 Mon Sep 17 00:00:00 2001 From: john stultz Date: Tue, 27 Sep 2005 21:45:36 -0700 Subject: [PATCH] x86-64: Fix bad assumption that dualcore cpus have synced TSCs This should resolve the issue seen in bugme bug #5105, where it is assumed that dualcore x86_64 systems have synced TSCs. This is not the case, and alternate timesources should be used instead. For more details, see: http://bugzilla.kernel.org/show_bug.cgi?id=5105 Andi's earlier concerns that the TSCs should be synced on dualcore systems have been resolved by confirmation from AMD folks that they can be unsynced. Acked-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/time.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 2373cb8b862..703acde2a1a 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -959,9 +959,6 @@ static __init int unsynchronized_tsc(void) are handled in the OEM check above. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return 0; - /* All in a single socket - should be synchronized */ - if (cpus_weight(cpu_core_map[0]) == num_online_cpus()) - return 0; #endif /* Assume multi socket systems are not synchronized */ return num_online_cpus() > 1; -- cgit v1.2.3-70-g09d2 From 7d318d774789657c37a5e994a4a2cf59d4879ae7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 29 Sep 2005 22:05:55 +0200 Subject: [PATCH] Fix up TLB flush filter disabling I checked with AMD and they requested to only disable it for family 15. Also disable it for i386 too. And some style fixes. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/cpu/amd.c | 16 ++++++++++++++++ arch/x86_64/kernel/setup.c | 22 ++++++++++++---------- include/asm-x86_64/msr.h | 1 + 3 files changed, 29 insertions(+), 10 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 73aeaf5a9d4..4c1ddf2b57c 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -28,6 +28,22 @@ static void __init init_amd(struct cpuinfo_x86 *c) int mbytes = num_physpages >> (20-PAGE_SHIFT); int r; +#ifdef CONFIG_SMP + unsigned long value; + + /* Disable TLB flush filter by setting HWCR.FFDIS on K8 + * bit 6 of msr C001_0015 + * + * Errata 63 for SH-B3 steppings + * Errata 122 for all steppings (F+ have it disabled by default) + */ + if (c->x86 == 15) { + rdmsrl(MSR_K7_HWCR, value); + value |= 1 << 6; + wrmsrl(MSR_K7_HWCR, value); + } +#endif + /* * FIXME: We should handle the K5 here. Set up the write * range and also turn on MSR 83 bits 4 and 31 (write alloc, diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 238f73e1a83..257f5ba1790 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -831,8 +831,6 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) #endif } -#define HWCR 0xc0010015 - static int __init init_amd(struct cpuinfo_x86 *c) { int r; @@ -841,14 +839,18 @@ static int __init init_amd(struct cpuinfo_x86 *c) #ifdef CONFIG_SMP unsigned long value; - // Disable TLB flush filter by setting HWCR.FFDIS: - // bit 6 of msr C001_0015 - // - // Errata 63 for SH-B3 steppings - // Errata 122 for all(?) steppings - rdmsrl(HWCR, value); - value |= 1 << 6; - wrmsrl(HWCR, value); + /* + * Disable TLB flush filter by setting HWCR.FFDIS on K8 + * bit 6 of msr C001_0015 + * + * Errata 63 for SH-B3 steppings + * Errata 122 for all steppings (F+ have it disabled by default) + */ + if (c->x86 == 15) { + rdmsrl(MSR_K8_HWCR, value); + value |= 1 << 6; + wrmsrl(MSR_K8_HWCR, value); + } #endif /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index 4d727f3f555..5a7fe3c6c3d 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h @@ -234,6 +234,7 @@ static inline unsigned int cpuid_edx(unsigned int op) #define MSR_K8_TOP_MEM1 0xC001001A #define MSR_K8_TOP_MEM2 0xC001001D #define MSR_K8_SYSCFG 0xC0010010 +#define MSR_K8_HWCR 0xC0010015 /* K6 MSRs */ #define MSR_K6_EFER 0xC0000080 -- cgit v1.2.3-70-g09d2 From 7644143cd6f7e029f3a8ea64f5fb0ab33ec39f72 Mon Sep 17 00:00:00 2001 From: Mike Waychison Date: Fri, 30 Sep 2005 00:01:27 +0200 Subject: [PATCH] x86_64: Fix mce_log The attempt to fixup the lockless mce log buffer introduced an infinite loop when trying to find a free entry. And: Using rcu_dereference() to load mcelog.next doesn't seem to be sufficient enough to ensure that mcelog.next is loaded each time around the loop in mce_log(). Instead, use an explicit rmb() to ensure that the compiler gets it right. AK: turned the smp_wmbs into true wmbs to make sure they are not reordered by the compiler on UP. Signed-off-by: Mike Waychison Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/mce.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index 08203b07f4b..69541db5ff2 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c @@ -54,9 +54,12 @@ void mce_log(struct mce *mce) { unsigned next, entry; mce->finished = 0; - smp_wmb(); + wmb(); for (;;) { entry = rcu_dereference(mcelog.next); + /* The rmb forces the compiler to reload next in each + iteration */ + rmb(); for (;;) { /* When the buffer fills up discard new entries. Assume that the earlier errors are the more interesting. */ @@ -69,6 +72,7 @@ void mce_log(struct mce *mce) entry++; continue; } + break; } smp_rmb(); next = entry + 1; @@ -76,9 +80,9 @@ void mce_log(struct mce *mce) break; } memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); - smp_wmb(); + wmb(); mcelog.entry[entry].finished = 1; - smp_wmb(); + wmb(); if (!test_and_set_bit(0, &console_logged)) notify_user = 1; -- cgit v1.2.3-70-g09d2 From 2dd960d66bc12b6b206e63104636514e5da0ddb7 Mon Sep 17 00:00:00 2001 From: "Zhang, Yanmin" Date: Fri, 30 Sep 2005 11:59:20 -0700 Subject: [PATCH] utilization of kprobe_mutex is incorrect on x86_64 The up()/down() orders are incorrect in arch/x86_64/kprobes.c file. kprobe_mutext is used to protect the free kprobe instruction slot list. arch_prepare_kprobe applies for a slot from the free list, and arch_remove_kprobe returns a slot to the free list. The incorrect up()/down() orders to operate on kprobe_mutex fail to protect the free list. If 2 threads try to get/return kprobe instruction slot at the same time, the free slot list might be broken, or a free slot might be applied by 2 threads. Signed-off-by: Zhang Yanmin Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/kprobes.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index df08c43276a..76a28b007be 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -77,9 +77,9 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn) int __kprobes arch_prepare_kprobe(struct kprobe *p) { /* insn: must be on special executable page on x86_64. */ - up(&kprobe_mutex); - p->ainsn.insn = get_insn_slot(); down(&kprobe_mutex); + p->ainsn.insn = get_insn_slot(); + up(&kprobe_mutex); if (!p->ainsn.insn) { return -ENOMEM; } @@ -231,9 +231,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - up(&kprobe_mutex); - free_insn_slot(p->ainsn.insn); down(&kprobe_mutex); + free_insn_slot(p->ainsn.insn); + up(&kprobe_mutex); } static inline void save_previous_kprobe(void) -- cgit v1.2.3-70-g09d2 From e6a045a5b89037ae87c8c1bc84403f1d498e52a1 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Fri, 30 Sep 2005 11:59:21 -0700 Subject: [PATCH] x86_64: fix the BP node_to_cpumask Fix the BP node_to_cpumask. 2.6.14-rc* broke the boot cpu bit as the cpu_to_node(0) is now not setup early enough for numa_init_array. cpu_to_node[] is setup much later at srat_detect_node on acpi srat based em64t machines. This seems like a problem on amd machines too, Tested on em64t though. /sys/devices/system/node/node0/cpumap shows up sanely after this patch. Signed off by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86_64/mm/numa.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index 80a49d9bd8a..68ad7585351 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -178,7 +178,6 @@ void __init numa_init_array(void) rr++; } - set_bit(0, &node_to_cpumask[cpu_to_node(0)]); } #ifdef CONFIG_NUMA_EMU @@ -266,9 +265,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) __cpuinit void numa_add_cpu(int cpu) { - /* BP is initialized elsewhere */ - if (cpu) - set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]); + set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]); } unsigned long __init numa_free_all_bootmem(void) -- cgit v1.2.3-70-g09d2 From 85cc5135ace4c8b75d7b4e1ea9fe15a7fcbd1516 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Fri, 30 Sep 2005 11:59:22 -0700 Subject: [PATCH] x86_64 early numa init fix The tests Alok carried out on Petr's box confirmed that cpu_to_node[BP] is not setup early enough by numa_init_array due to the x86_64 changes in 2.6.14-rc*, and unfortunately set wrongly by the work around code in numa_init_array(). cpu_to_node[0] gets set with 1 early and later gets set properly to 0 during identify_cpu() when all cpus are brought up, but confusing the numa slab in the process. Here is a quick fix for this. The right fix obviously is to have cpu_to_node[bsp] setup early for numa_init_array(). The following patch will fix the problem now, and the code can stay on even when cpu_to_node{BP] gets fixed early correctly. Thanks to Petr for access to his box. Signed off by: Ravikiran Thirumalai Signed-off-by: Alok N Kataria Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86_64/mm/numa.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index 68ad7585351..21480382100 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -167,15 +167,14 @@ void __init numa_init_array(void) mapping. To avoid this fill in the mapping for all possible CPUs, as the number of CPUs is not known yet. We round robin the existing nodes. */ - rr = 0; + rr = first_node(node_online_map); for (i = 0; i < NR_CPUS; i++) { if (cpu_to_node[i] != NUMA_NO_NODE) continue; + cpu_to_node[i] = rr; rr = next_node(rr, node_online_map); if (rr == MAX_NUMNODES) rr = first_node(node_online_map); - cpu_to_node[i] = rr; - rr++; } } -- cgit v1.2.3-70-g09d2 From ddea7be0ec8d1374f0b483a81566ed56ec9f3905 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Mon, 3 Oct 2005 10:36:28 -0700 Subject: [PATCH] x86_64: Fix numa node topology detection for srat based x86_64 boxes 2.6.14-rc2 does not assign cpus to proper nodeids on our em64t numa boxen. Our boxes use acpi srat for parsing the numa information. srat_detect_node() used phys_proc_id[] to get to the cpu's local apic id, but phys_proc_id[] represents the cpu<->initial_apic_id mapping. The following patch fixes this problem. Now apicid_to_node[] is properly indexed with the local apic id. Signed-off-by: Ravikiran Thirumalai Acked-by: Suresh Siddha Cc: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 257f5ba1790..cb28df14ff6 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -967,13 +967,12 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) static void srat_detect_node(void) { #ifdef CONFIG_NUMA - unsigned apicid, node; + unsigned node; int cpu = smp_processor_id(); /* Don't do the funky fallback heuristics the AMD version employs for now. */ - apicid = phys_proc_id[cpu]; - node = apicid_to_node[apicid]; + node = apicid_to_node[hard_smp_processor_id()]; if (node == NUMA_NO_NODE) node = 0; cpu_to_node[cpu] = node; -- cgit v1.2.3-70-g09d2 From 944d2647dded12e2b05ad8ebc020644bb1997ce1 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 5 Oct 2005 00:21:39 +0200 Subject: [PATCH] x86_64: Drop global bit from early low mappings Drop global bit from early low mappings Suggested by Linus, originally also proposed by Suresh. This fixes a race condition with early start of udev, originally tracked down by Suresh B. Siddha. The problem was that switching to the user space VM would not clear the global low mappings for the beginning of memory, which lead to memory corruption. Drop the global bits. The kernel mapping stays global because it should stay constant. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/head.S | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index 4592bf21fca..b92e5f45ed4 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S @@ -270,26 +270,26 @@ ENTRY(level3_kernel_pgt) .org 0x4000 ENTRY(level2_ident_pgt) /* 40MB for bootup. */ - .quad 0x0000000000000183 - .quad 0x0000000000200183 - .quad 0x0000000000400183 - .quad 0x0000000000600183 - .quad 0x0000000000800183 - .quad 0x0000000000A00183 - .quad 0x0000000000C00183 - .quad 0x0000000000E00183 - .quad 0x0000000001000183 - .quad 0x0000000001200183 - .quad 0x0000000001400183 - .quad 0x0000000001600183 - .quad 0x0000000001800183 - .quad 0x0000000001A00183 - .quad 0x0000000001C00183 - .quad 0x0000000001E00183 - .quad 0x0000000002000183 - .quad 0x0000000002200183 - .quad 0x0000000002400183 - .quad 0x0000000002600183 + .quad 0x0000000000000083 + .quad 0x0000000000200083 + .quad 0x0000000000400083 + .quad 0x0000000000600083 + .quad 0x0000000000800083 + .quad 0x0000000000A00083 + .quad 0x0000000000C00083 + .quad 0x0000000000E00083 + .quad 0x0000000001000083 + .quad 0x0000000001200083 + .quad 0x0000000001400083 + .quad 0x0000000001600083 + .quad 0x0000000001800083 + .quad 0x0000000001A00083 + .quad 0x0000000001C00083 + .quad 0x0000000001E00083 + .quad 0x0000000002000083 + .quad 0x0000000002200083 + .quad 0x0000000002400083 + .quad 0x0000000002600083 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */ .globl temp_boot_pmds temp_boot_pmds: -- cgit v1.2.3-70-g09d2 From 3dd083255ddcfa87751fa8e32f61a9547a15a541 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 9 Oct 2005 21:19:40 +0200 Subject: [PATCH] x86_64: Set up safe page tables during resume The following patch makes swsusp avoid the possible temporary corruption of page translation tables during resume on x86-64. This is achieved by creating a copy of the relevant page tables that will not be modified by swsusp and can be safely used by it on resume. The problem is that during resume on x86-64 swsusp may temporarily corrupt the page tables used for the direct mapping of RAM. If that happens, a page fault occurs and cannot be handled properly, which leads to the solid hang of the affected system. This leads to the loss of the system's state from before suspend and may result in the loss of data or the corruption of filesystems, so it is a serious issue. Also, it appears to happen quite often (for me, as often as 50% of the time). The problem is related to the fact that (at least) one of the PMD entries used in the direct memory mapping (starting at PAGE_OFFSET) points to a page table the physical address of which is much greater than the physical address of the PMD entry itself. Moreover, unfortunately, the physical address of the page table before suspend (i.e. the one stored in the suspend image) happens to be different to the physical address of the corresponding page table used during resume (i.e. the one that is valid right before swsusp_arch_resume() in arch/x86_64/kernel/suspend_asm.S is executed). Thus while the image is restored, the "offending" PMD entry gets overwritten, so it does not point to the right physical address any more (i.e. there's no page table at the address pointed to by it, because it points to the address the page table has been at during suspend). Consequently, if the PMD entry is used later on, and it _is_ used in the process of copying the image pages, a page fault occurs, but it cannot be handled in the normal way and the system hangs. In principle we can call create_resume_mapping() from swsusp_arch_resume() (ie. from suspend_asm.S), but then the memory allocations in create_resume_mapping(), resume_pud_mapping(), and resume_pmd_mapping() must be made carefully so that we use _only_ NosaveFree pages in them (the other pages are overwritten by the loop in swsusp_arch_resume()). Additionally, we are in atomic context at that time, so we cannot use GFP_KERNEL. Moreover, if one of the allocations fails, we should free all of the allocated pages, so we need to trace them somehow. All of this is done in the appended patch, except that the functions populating the page tables are located in arch/x86_64/kernel/suspend.c rather than in init.c. It may be done in a more elegan way in the future, with the help of some swsusp patches that are in the works now. [AK: move some externs into headers, renamed a function] Signed-off-by: Rafael J. Wysocki Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/suspend.c | 127 +++++++++++++++++++++++++++++++++++++++ arch/x86_64/kernel/suspend_asm.S | 17 ++++-- include/linux/suspend.h | 2 + kernel/power/swsusp.c | 7 ++- 4 files changed, 144 insertions(+), 9 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c index ebb9abf3ce6..f066c6ab361 100644 --- a/arch/x86_64/kernel/suspend.c +++ b/arch/x86_64/kernel/suspend.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include struct saved_context saved_context; @@ -140,4 +142,129 @@ void fix_processor_context(void) } +#ifdef CONFIG_SOFTWARE_SUSPEND +/* Defined in arch/x86_64/kernel/suspend_asm.S */ +extern int restore_image(void); +pgd_t *temp_level4_pgt; + +static void **pages; + +static inline void *__add_page(void) +{ + void **c; + + c = (void **)get_usable_page(GFP_ATOMIC); + if (c) { + *c = pages; + pages = c; + } + return c; +} + +static inline void *__next_page(void) +{ + void **c; + + c = pages; + if (c) { + pages = *c; + *c = NULL; + } + return c; +} + +/* + * Try to allocate as many usable pages as needed and daisy chain them. + * If one allocation fails, free the pages allocated so far + */ +static int alloc_usable_pages(unsigned long n) +{ + void *p; + + pages = NULL; + do + if (!__add_page()) + break; + while (--n); + if (n) { + p = __next_page(); + while (p) { + free_page((unsigned long)p); + p = __next_page(); + } + return -ENOMEM; + } + return 0; +} + +static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) +{ + long i, j; + + i = pud_index(address); + pud = pud + i; + for (; i < PTRS_PER_PUD; pud++, i++) { + unsigned long paddr; + pmd_t *pmd; + + paddr = address + i*PUD_SIZE; + if (paddr >= end) + break; + + pmd = (pmd_t *)__next_page(); + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); + for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { + unsigned long pe; + + if (paddr >= end) + break; + pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr; + pe &= __supported_pte_mask; + set_pmd(pmd, __pmd(pe)); + } + } +} + +static void set_up_temporary_mappings(void) +{ + unsigned long start, end, next; + + temp_level4_pgt = (pgd_t *)__next_page(); + + /* It is safe to reuse the original kernel mapping */ + set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), + init_level4_pgt[pgd_index(__START_KERNEL_map)]); + + /* Set up the direct mapping from scratch */ + start = (unsigned long)pfn_to_kaddr(0); + end = (unsigned long)pfn_to_kaddr(end_pfn); + + for (; start < end; start = next) { + pud_t *pud = (pud_t *)__next_page(); + next = start + PGDIR_SIZE; + if (next > end) + next = end; + res_phys_pud_init(pud, __pa(start), __pa(next)); + set_pgd(temp_level4_pgt + pgd_index(start), + mk_kernel_pgd(__pa(pud))); + } +} + +int swsusp_arch_resume(void) +{ + unsigned long n; + + n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT; + n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1; + pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n); + if (alloc_usable_pages(n)) { + free_eaten_memory(); + return -ENOMEM; + } + /* We have got enough memory and from now on we cannot recover */ + set_up_temporary_mappings(); + restore_image(); + return 0; +} +#endif /* CONFIG_SOFTWARE_SUSPEND */ diff --git a/arch/x86_64/kernel/suspend_asm.S b/arch/x86_64/kernel/suspend_asm.S index 4d659e97df1..320b6fb00cc 100644 --- a/arch/x86_64/kernel/suspend_asm.S +++ b/arch/x86_64/kernel/suspend_asm.S @@ -39,12 +39,13 @@ ENTRY(swsusp_arch_suspend) call swsusp_save ret -ENTRY(swsusp_arch_resume) - /* set up cr3 */ - leaq init_level4_pgt(%rip),%rax - subq $__START_KERNEL_map,%rax - movq %rax,%cr3 - +ENTRY(restore_image) + /* switch to temporary page tables */ + movq $__PAGE_OFFSET, %rdx + movq temp_level4_pgt(%rip), %rax + subq %rdx, %rax + movq %rax, %cr3 + /* Flush TLB */ movq mmu_cr4_features(%rip), %rax movq %rax, %rdx andq $~(1<<7), %rdx # PGE @@ -69,6 +70,10 @@ loop: movq pbe_next(%rdx), %rdx jmp loop done: + /* go back to the original page tables */ + leaq init_level4_pgt(%rip), %rax + subq $__START_KERNEL_map, %rax + movq %rax, %cr3 /* Flush TLB, including "global" things (vmalloc) */ movq mmu_cr4_features(%rip), %rax movq %rax, %rdx diff --git a/include/linux/suspend.h b/include/linux/suspend.h index f2e96fdfaae..ad15a54806d 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -71,5 +71,7 @@ void restore_processor_state(void); struct saved_context; void __save_processor_state(struct saved_context *ctxt); void __restore_processor_state(struct saved_context *ctxt); +extern unsigned long get_usable_page(unsigned gfp_mask); +extern void free_eaten_memory(void); #endif /* _LINUX_SWSUSP_H */ diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index acf79ac1cb6..2d5c4567644 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -1095,7 +1095,7 @@ static inline void eat_page(void *page) *eaten_memory = c; } -static unsigned long get_usable_page(unsigned gfp_mask) +unsigned long get_usable_page(unsigned gfp_mask) { unsigned long m; @@ -1109,7 +1109,7 @@ static unsigned long get_usable_page(unsigned gfp_mask) return m; } -static void free_eaten_memory(void) +void free_eaten_memory(void) { unsigned long m; void **c; @@ -1481,11 +1481,12 @@ static int read_suspend_image(void) /* Allocate memory for the image and read the data from swap */ error = check_pagedir(pagedir_nosave); - free_eaten_memory(); + if (!error) error = data_read(pagedir_nosave); if (error) { /* We fail cleanly */ + free_eaten_memory(); for_each_pbe (p, pagedir_nosave) if (p->address) { free_page(p->address); -- cgit v1.2.3-70-g09d2 From d347f372273c2b3d86a66e2e1c94c790c208e166 Mon Sep 17 00:00:00 2001 From: "Markus F.X.J. Oberhumer" Date: Sun, 9 Oct 2005 18:54:23 +0200 Subject: [PATCH] i386: fix stack alignment for signal handlers This fixes the setup of the alignment of the signal frame, so that all signal handlers are run with a properly aligned stack frame. The current code "over-aligns" the stack pointer so that the stack frame is effectively always mis-aligned by 4 bytes. But what we really want is that on function entry ((sp + 4) & 15) == 0, which matches what would happen if the stack were aligned before a "call" instruction. Signed-off-by: Markus F.X.J. Oberhumer Signed-off-by: Linus Torvalds --- arch/i386/kernel/signal.c | 6 +++++- arch/x86_64/ia32/ia32_signal.c | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index 61eb0c8a6e4..adcd069db91 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c @@ -338,7 +338,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) esp = (unsigned long) ka->sa.sa_restorer; } - return (void __user *)((esp - frame_size) & -8ul); + esp -= frame_size; + /* Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. */ + esp = ((esp + 4) & -16ul) - 4; + return (void __user *) esp; } /* These symbols are defined with the addresses in the vsyscall page. diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c index 66e2821533d..0903cc1faef 100644 --- a/arch/x86_64/ia32/ia32_signal.c +++ b/arch/x86_64/ia32/ia32_signal.c @@ -425,7 +425,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) rsp = (unsigned long) ka->sa.sa_restorer; } - return (void __user *)((rsp - frame_size) & -8UL); + rsp -= frame_size; + /* Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. */ + rsp = ((rsp + 4) & -16ul) - 4; + return (void __user *) rsp; } int ia32_setup_frame(int sig, struct k_sigaction *ka, -- cgit v1.2.3-70-g09d2 From 094804c5a132f04c12dd4902ee15c64362e5c1af Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 11 Oct 2005 01:03:39 +0200 Subject: [PATCH] x86_64: Fix change_page_attr cache flushing Noticed by Terence Ripperda Undo wrong change in global_flush_tlb. We need to flush the caches in all cases, not just when pages were reverted. This was a bogus optimization added earlier, but it was wrong. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/mm/pageattr.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index 94862e1ec03..b90e8fe9eeb 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c @@ -220,8 +220,6 @@ void global_flush_tlb(void) down_read(&init_mm.mmap_sem); df = xchg(&df_list, NULL); up_read(&init_mm.mmap_sem); - if (!df) - return; flush_map((df && !df->next) ? df->address : 0); for (; df; df = next_df) { next_df = df->next; -- cgit v1.2.3-70-g09d2 From 421c7ce6d001fce28b1fa8fdd2e7ded0ed8a0ad5 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 10 Oct 2005 22:32:45 +0200 Subject: [PATCH] x86_64: Allocate cpu local data for all possible CPUs CPU hotplug fills up the possible map to NR_CPUs, but it did that after setting up per CPU data. This lead to CPU data not getting allocated for all possible CPUs, which lead to various side effects. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup64.c | 4 ++++ arch/x86_64/kernel/smpboot.c | 6 +----- include/asm-x86_64/smp.h | 1 + 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index bd33be24a38..79190891fbc 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c @@ -87,6 +87,10 @@ void __init setup_per_cpu_areas(void) int i; unsigned long size; +#ifdef CONFIG_HOTPLUG_CPU + prefill_possible_map(); +#endif + /* Copy section for each CPU (we discard the original) */ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); #ifdef CONFIG_MODULES diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index e12d7baeb33..658a81b33f3 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -892,7 +892,7 @@ static __init void disable_smp(void) * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range. * - Ashok Raj */ -static void prefill_possible_map(void) +__init void prefill_possible_map(void) { int i; for (i = 0; i < NR_CPUS; i++) @@ -967,10 +967,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) current_cpu_data = boot_cpu_data; current_thread_info()->cpu = 0; /* needed? */ -#ifdef CONFIG_HOTPLUG_CPU - prefill_possible_map(); -#endif - if (smp_sanity_check(max_cpus) < 0) { printk(KERN_INFO "SMP disabled\n"); disable_smp(); diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 24e32611f0b..c57ce407134 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -81,6 +81,7 @@ static inline int hard_smp_processor_id(void) extern int safe_smp_processor_id(void); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); +extern void prefill_possible_map(void); #endif /* !ASSEMBLY */ -- cgit v1.2.3-70-g09d2