diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2006-02-07 01:47:12 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2006-02-07 01:47:12 -0500 |
commit | 3c9b3a8575b4f2551e3b5b74ffa1c3559c6338eb (patch) | |
tree | 7f8d84353852401ec74e005f6f0b1eb958b9a70d /arch/i386/kernel | |
parent | c0d3c0c0ce94d3db893577ae98e64414d92e49d8 (diff) | |
parent | c03296a868ae7c91aa2d8b372184763b18f16d7a (diff) |
Merge branch 'master'
Diffstat (limited to 'arch/i386/kernel')
24 files changed, 224 insertions, 177 deletions
diff --git a/arch/i386/kernel/acpi/Makefile b/arch/i386/kernel/acpi/Makefile index 267ca48e1b6..d51c7313cae 100644 --- a/arch/i386/kernel/acpi/Makefile +++ b/arch/i386/kernel/acpi/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o ifneq ($(CONFIG_ACPI_PROCESSOR),) -obj-y += cstate.o +obj-y += cstate.o processor.o endif diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 2111529dea7..79577f0ace9 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -248,10 +248,17 @@ acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end) acpi_table_print_madt_entry(header); - /* Register even disabled CPUs for cpu hotplug */ - - x86_acpiid_to_apicid[processor->acpi_id] = processor->id; + /* Record local apic id only when enabled */ + if (processor->flags.enabled) + x86_acpiid_to_apicid[processor->acpi_id] = processor->id; + /* + * We need to register disabled CPU as well to permit + * counting disabled CPUs. This allows us to size + * cpus_possible_map more accurately, to permit + * to not preallocating memory for all NR_CPUS + * when we use CPU hotplug. + */ mp_register_lapic(processor->id, /* APIC ID */ processor->flags.enabled); /* Enabled? */ @@ -464,7 +471,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) * success: return IRQ number (>=0) * failure: return < 0 */ -int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) +int acpi_register_gsi(u32 gsi, int triggering, int polarity) { unsigned int irq; unsigned int plat_gsi = gsi; @@ -476,14 +483,14 @@ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { extern void eisa_set_level_irq(unsigned int irq); - if (edge_level == ACPI_LEVEL_SENSITIVE) + if (triggering == ACPI_LEVEL_SENSITIVE) eisa_set_level_irq(gsi); } #endif #ifdef CONFIG_X86_IO_APIC if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { - plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low); + plat_gsi = mp_register_gsi(gsi, triggering, polarity); } #endif acpi_gsi_to_irq(plat_gsi, &irq); diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c index 4c3036ba65d..25db49ef177 100644 --- a/arch/i386/kernel/acpi/cstate.c +++ b/arch/i386/kernel/acpi/cstate.c @@ -14,64 +14,6 @@ #include <acpi/processor.h> #include <asm/acpi.h> -static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power - *pow) -{ - struct acpi_object_list *obj_list; - union acpi_object *obj; - u32 *buf; - - /* allocate and initialize pdc. It will be used later. */ - obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); - if (!obj_list) { - printk(KERN_ERR "Memory allocation error\n"); - return; - } - - obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); - if (!obj) { - printk(KERN_ERR "Memory allocation error\n"); - kfree(obj_list); - return; - } - - buf = kmalloc(12, GFP_KERNEL); - if (!buf) { - printk(KERN_ERR "Memory allocation error\n"); - kfree(obj); - kfree(obj_list); - return; - } - - buf[0] = ACPI_PDC_REVISION_ID; - buf[1] = 1; - buf[2] = ACPI_PDC_C_CAPABILITY_SMP; - - obj->type = ACPI_TYPE_BUFFER; - obj->buffer.length = 12; - obj->buffer.pointer = (u8 *) buf; - obj_list->count = 1; - obj_list->pointer = obj; - pow->pdc = obj_list; - - return; -} - -/* Initialize _PDC data based on the CPU vendor */ -void acpi_processor_power_init_pdc(struct acpi_processor_power *pow, - unsigned int cpu) -{ - struct cpuinfo_x86 *c = cpu_data + cpu; - - pow->pdc = NULL; - if (c->x86_vendor == X86_VENDOR_INTEL) - acpi_processor_power_init_intel_pdc(pow); - - return; -} - -EXPORT_SYMBOL(acpi_processor_power_init_pdc); - /* * Initialize bm_flags based on the CPU cache properties * On SMP it depends on cache configuration diff --git a/arch/i386/kernel/acpi/processor.c b/arch/i386/kernel/acpi/processor.c new file mode 100644 index 00000000000..9f4cc02717e --- /dev/null +++ b/arch/i386/kernel/acpi/processor.c @@ -0,0 +1,75 @@ +/* + * arch/i386/kernel/acpi/processor.c + * + * Copyright (C) 2005 Intel Corporation + * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> + * - Added _PDC for platforms with Intel CPUs + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> + +#include <acpi/processor.h> +#include <asm/acpi.h> + +static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) +{ + struct acpi_object_list *obj_list; + union acpi_object *obj; + u32 *buf; + + /* allocate and initialize pdc. It will be used later. */ + obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); + if (!obj_list) { + printk(KERN_ERR "Memory allocation error\n"); + return; + } + + obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); + if (!obj) { + printk(KERN_ERR "Memory allocation error\n"); + kfree(obj_list); + return; + } + + buf = kmalloc(12, GFP_KERNEL); + if (!buf) { + printk(KERN_ERR "Memory allocation error\n"); + kfree(obj); + kfree(obj_list); + return; + } + + buf[0] = ACPI_PDC_REVISION_ID; + buf[1] = 1; + buf[2] = ACPI_PDC_C_CAPABILITY_SMP; + + if (cpu_has(c, X86_FEATURE_EST)) + buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; + + obj->type = ACPI_TYPE_BUFFER; + obj->buffer.length = 12; + obj->buffer.pointer = (u8 *) buf; + obj_list->count = 1; + obj_list->pointer = obj; + pr->pdc = obj_list; + + return; +} + +/* Initialize _PDC data based on the CPU vendor */ +void arch_acpi_processor_init_pdc(struct acpi_processor *pr) +{ + unsigned int cpu = pr->id; + struct cpuinfo_x86 *c = cpu_data + cpu; + + pr->pdc = NULL; + if (c->x86_vendor == X86_VENDOR_INTEL) + init_intel_pdc(pr, c); + + return; +} + +EXPORT_SYMBOL(arch_acpi_processor_init_pdc); diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index acd3f1e34ca..98a5c23cf3d 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -75,8 +75,10 @@ void ack_bad_irq(unsigned int irq) * holds up an irq slot - in excessive cases (when multiple * unexpected vectors occur) that might lock up the APIC * completely. + * But only ack when the APIC is enabled -AK */ - ack_APIC_irq(); + if (!cpu_has_apic) + ack_APIC_irq(); } void __init apic_intr_init(void) @@ -1303,6 +1305,7 @@ int __init APIC_init_uniprocessor (void) if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", boot_cpu_physical_apicid); + clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); return -1; } diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 333578a4e91..0810f81f2a0 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -282,3 +282,11 @@ int __init amd_init_cpu(void) } //early_arch_initcall(amd_init_cpu); + +static int __init amd_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_AMD] = NULL; + return 0; +} + +late_initcall(amd_exit_cpu); diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c index 394814e5767..f52669ecb93 100644 --- a/arch/i386/kernel/cpu/centaur.c +++ b/arch/i386/kernel/cpu/centaur.c @@ -405,10 +405,6 @@ static void __init init_centaur(struct cpuinfo_x86 *c) winchip2_protect_mcr(); #endif break; - case 10: - name="4"; - /* no info on the WC4 yet */ - break; default: name="??"; } @@ -474,3 +470,11 @@ int __init centaur_init_cpu(void) } //early_arch_initcall(centaur_init_cpu); + +static int __init centaur_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_CENTAUR] = NULL; + return 0; +} + +late_initcall(centaur_exit_cpu); diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 15aee26ec2b..7eb9213734a 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -44,6 +44,7 @@ static void default_init(struct cpuinfo_x86 * c) static struct cpu_dev default_cpu = { .c_init = default_init, + .c_vendor = "Unknown", }; static struct cpu_dev * this_cpu = &default_cpu; @@ -150,6 +151,7 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) { char *v = c->x86_vendor_id; int i; + static int printed; for (i = 0; i < X86_VENDOR_NUM; i++) { if (cpu_devs[i]) { @@ -159,10 +161,17 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) c->x86_vendor = i; if (!early) this_cpu = cpu_devs[i]; - break; + return; } } } + if (!printed) { + printed++; + printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); + printk(KERN_ERR "CPU: Your system may be unstable.\n"); + } + c->x86_vendor = X86_VENDOR_UNKNOWN; + this_cpu = &default_cpu; } diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig index 0f1eb507233..26892d2099b 100644 --- a/arch/i386/kernel/cpu/cpufreq/Kconfig +++ b/arch/i386/kernel/cpu/cpufreq/Kconfig @@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI config X86_GX_SUSPMOD tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" + depends on PCI help This add the CPUFreq driver for NatSemi Geode processors which support suspend modulation. diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index 7975e79d5fa..3852d0a4c1b 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -295,68 +295,6 @@ acpi_cpufreq_guess_freq ( } -/* - * acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities - * of this driver - * @perf: processor-specific acpi_io_data struct - * @cpu: CPU being initialized - * - * To avoid issues with legacy OSes, some BIOSes require to be informed of - * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC - * accordingly, for Enhanced Speedstep. Actual call to _PDC is done in - * driver/acpi/processor.c - */ -static void -acpi_processor_cpu_init_pdc_est( - struct acpi_processor_performance *perf, - unsigned int cpu, - struct acpi_object_list *obj_list - ) -{ - union acpi_object *obj; - u32 *buf; - struct cpuinfo_x86 *c = cpu_data + cpu; - dprintk("acpi_processor_cpu_init_pdc_est\n"); - - if (!cpu_has(c, X86_FEATURE_EST)) - return; - - /* Initialize pdc. It will be used later. */ - if (!obj_list) - return; - - if (!(obj_list->count && obj_list->pointer)) - return; - - obj = obj_list->pointer; - if ((obj->buffer.length == 12) && obj->buffer.pointer) { - buf = (u32 *)obj->buffer.pointer; - buf[0] = ACPI_PDC_REVISION_ID; - buf[1] = 1; - buf[2] = ACPI_PDC_EST_CAPABILITY_SMP; - perf->pdc = obj_list; - } - return; -} - - -/* CPU specific PDC initialization */ -static void -acpi_processor_cpu_init_pdc( - struct acpi_processor_performance *perf, - unsigned int cpu, - struct acpi_object_list *obj_list - ) -{ - struct cpuinfo_x86 *c = cpu_data + cpu; - dprintk("acpi_processor_cpu_init_pdc\n"); - perf->pdc = NULL; - if (cpu_has(c, X86_FEATURE_EST)) - acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list); - return; -} - - static int acpi_cpufreq_cpu_init ( struct cpufreq_policy *policy) @@ -367,14 +305,7 @@ acpi_cpufreq_cpu_init ( unsigned int result = 0; struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; - union acpi_object arg0 = {ACPI_TYPE_BUFFER}; - u32 arg0_buf[3]; - struct acpi_object_list arg_list = {1, &arg0}; - dprintk("acpi_cpufreq_cpu_init\n"); - /* setup arg_list for _PDC settings */ - arg0.buffer.length = 12; - arg0.buffer.pointer = (u8 *) arg0_buf; data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); if (!data) @@ -382,9 +313,7 @@ acpi_cpufreq_cpu_init ( acpi_io_data[cpu] = data; - acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list); result = acpi_processor_register_performance(&data->acpi_data, cpu); - data->acpi_data.pdc = NULL; if (result) goto err_free; diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c index 270f2188d68..cc73a7ae34b 100644 --- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c @@ -52,6 +52,7 @@ enum { static int has_N44_O17_errata[NR_CPUS]; +static int has_N60_errata[NR_CPUS]; static unsigned int stock_freq; static struct cpufreq_driver p4clockmod_driver; static unsigned int cpufreq_p4_get(unsigned int cpu); @@ -226,6 +227,12 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) case 0x0f12: has_N44_O17_errata[policy->cpu] = 1; dprintk("has errata -- disabling low frequencies\n"); + break; + + case 0x0f29: + has_N60_errata[policy->cpu] = 1; + dprintk("has errata -- disabling frequencies lower than 2ghz\n"); + break; } /* get max frequency */ @@ -237,6 +244,8 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { if ((i<2) && (has_N44_O17_errata[policy->cpu])) p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; + else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000) + p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; else p4clockmod_table[i].frequency = (stock_freq * i)/8; } diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c index 9a826cde4fd..c173c0fa117 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c @@ -362,22 +362,10 @@ static struct acpi_processor_performance p; */ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { - union acpi_object arg0 = {ACPI_TYPE_BUFFER}; - u32 arg0_buf[3]; - struct acpi_object_list arg_list = {1, &arg0}; unsigned long cur_freq; int result = 0, i; unsigned int cpu = policy->cpu; - /* _PDC settings */ - arg0.buffer.length = 12; - arg0.buffer.pointer = (u8 *) arg0_buf; - arg0_buf[0] = ACPI_PDC_REVISION_ID; - arg0_buf[1] = 1; - arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR; - - p.pdc = &arg_list; - /* register with ACPI core */ if (acpi_processor_register_performance(&p, cpu)) { dprintk(KERN_INFO PFX "obtaining ACPI data failed\n"); diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c index 75015975d03..00f2e058797 100644 --- a/arch/i386/kernel/cpu/cyrix.c +++ b/arch/i386/kernel/cpu/cyrix.c @@ -345,7 +345,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c) /* * Handle National Semiconductor branded processors */ -static void __devinit init_nsc(struct cpuinfo_x86 *c) +static void __init init_nsc(struct cpuinfo_x86 *c) { /* There may be GX1 processors in the wild that are branded * NSC and not Cyrix. @@ -444,6 +444,14 @@ int __init cyrix_init_cpu(void) //early_arch_initcall(cyrix_init_cpu); +static int __init cyrix_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_CYRIX] = NULL; + return 0; +} + +late_initcall(cyrix_exit_cpu); + static struct cpu_dev nsc_cpu_dev __initdata = { .c_vendor = "NSC", .c_ident = { "Geode by NSC" }, @@ -458,3 +466,11 @@ int __init nsc_init_cpu(void) } //early_arch_initcall(nsc_init_cpu); + +static int __init nsc_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_NSC] = NULL; + return 0; +} + +late_initcall(nsc_exit_cpu); diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index fbfd374aa33..ffe58cee0c4 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -43,13 +43,23 @@ static struct _cache_table cache_table[] __cpuinitdata = { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ + { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ + { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ + { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */ { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */ + { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */ + { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */ + { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ + { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */ + { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ + { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */ + { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */ { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ @@ -57,6 +67,7 @@ static struct _cache_table cache_table[] __cpuinitdata = { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ + { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */ { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ @@ -141,6 +152,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le return 0; } +/* will only be called once; __init is safe here */ static int __init find_num_cache_leaves(void) { unsigned int eax, ebx, ecx, edx; diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index 1e9db198c44..3b4618bed70 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c @@ -44,12 +44,10 @@ #include <asm/msr.h> #include "mtrr.h" -#define MTRR_VERSION "2.0 (20020519)" - u32 num_var_ranges = 0; unsigned int *usage_table; -static DECLARE_MUTEX(main_lock); +static DECLARE_MUTEX(mtrr_sem); u32 size_or_mask, size_and_mask; @@ -335,7 +333,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, /* No CPU hotplug when we change MTRR entries */ lock_cpu_hotplug(); /* Search for existing MTRR */ - down(&main_lock); + down(&mtrr_sem); for (i = 0; i < num_var_ranges; ++i) { mtrr_if->get(i, &lbase, &lsize, <ype); if (base >= lbase + lsize) @@ -373,7 +371,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, printk(KERN_INFO "mtrr: no more MTRRs available\n"); error = i; out: - up(&main_lock); + up(&mtrr_sem); unlock_cpu_hotplug(); return error; } @@ -466,7 +464,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) max = num_var_ranges; /* No CPU hotplug when we change MTRR entries */ lock_cpu_hotplug(); - down(&main_lock); + down(&mtrr_sem); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < max; ++i) { @@ -505,7 +503,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) set_mtrr(reg, 0, 0, 0); error = reg; out: - up(&main_lock); + up(&mtrr_sem); unlock_cpu_hotplug(); return error; } @@ -671,7 +669,6 @@ void __init mtrr_bp_init(void) break; } } - printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION); if (mtrr_if) { set_num_var_ranges(); @@ -688,7 +685,7 @@ void mtrr_ap_init(void) if (!mtrr_if || !use_intel()) return; /* - * Ideally we should hold main_lock here to avoid mtrr entries changed, + * Ideally we should hold mtrr_sem here to avoid mtrr entries changed, * but this routine will be called in cpu boot time, holding the lock * breaks it. This routine is called in two cases: 1.very earily time * of software resume, when there absolutely isn't mtrr entry changes; diff --git a/arch/i386/kernel/cpu/nexgen.c b/arch/i386/kernel/cpu/nexgen.c index 30898a260a5..ad87fa58058 100644 --- a/arch/i386/kernel/cpu/nexgen.c +++ b/arch/i386/kernel/cpu/nexgen.c @@ -61,3 +61,11 @@ int __init nexgen_init_cpu(void) } //early_arch_initcall(nexgen_init_cpu); + +static int __init nexgen_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_NEXGEN] = NULL; + return 0; +} + +late_initcall(nexgen_exit_cpu); diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c index 8602425628c..d08d5a2811c 100644 --- a/arch/i386/kernel/cpu/rise.c +++ b/arch/i386/kernel/cpu/rise.c @@ -51,3 +51,11 @@ int __init rise_init_cpu(void) } //early_arch_initcall(rise_init_cpu); + +static int __init rise_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_RISE] = NULL; + return 0; +} + +late_initcall(rise_exit_cpu); diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c index fc426380366..bdbeb77f4e2 100644 --- a/arch/i386/kernel/cpu/transmeta.c +++ b/arch/i386/kernel/cpu/transmeta.c @@ -84,7 +84,7 @@ static void __init init_transmeta(struct cpuinfo_x86 *c) #endif } -static void transmeta_identify(struct cpuinfo_x86 * c) +static void __init transmeta_identify(struct cpuinfo_x86 * c) { u32 xlvl; generic_identify(c); @@ -111,3 +111,11 @@ int __init transmeta_init_cpu(void) } //early_arch_initcall(transmeta_init_cpu); + +static int __init transmeta_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_TRANSMETA] = NULL; + return 0; +} + +late_initcall(transmeta_exit_cpu); diff --git a/arch/i386/kernel/cpu/umc.c b/arch/i386/kernel/cpu/umc.c index 264fcad559d..2cd988f6dc5 100644 --- a/arch/i386/kernel/cpu/umc.c +++ b/arch/i386/kernel/cpu/umc.c @@ -31,3 +31,11 @@ int __init umc_init_cpu(void) } //early_arch_initcall(umc_init_cpu); + +static int __init umc_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_UMC] = NULL; + return 0; +} + +late_initcall(umc_exit_cpu); diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index 91a64016956..0102f3d50e5 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c @@ -1080,7 +1080,7 @@ void __init mp_config_acpi_legacy_irqs (void) #define MAX_GSI_NUM 4096 -int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) +int mp_register_gsi (u32 gsi, int triggering, int polarity) { int ioapic = -1; int ioapic_pin = 0; @@ -1129,7 +1129,7 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); - if (edge_level) { + if (triggering == ACPI_LEVEL_SENSITIVE) { /* * For PCI devices assign IRQs in order, avoiding gaps * due to unused I/O APIC pins. @@ -1151,8 +1151,8 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) } io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, - edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, - active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); + triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, + polarity == ACPI_ACTIVE_HIGH ? 0 : 1); return gsi; } diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index d661703ac1c..63f39a7e2c9 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void) if (nmi_watchdog == NMI_LOCAL_APIC) smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); - for (cpu = 0; cpu < NR_CPUS; cpu++) + for_each_cpu(cpu) prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; local_irq_enable(); mdelay((10*1000)/nmi_hz); // wait 10 ticks diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 2185377fdde..0480454ebff 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -297,8 +297,10 @@ void show_regs(struct pt_regs * regs) if (user_mode(regs)) printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); - printk(" EFLAGS: %08lx %s (%s)\n", - regs->eflags, print_tainted(), system_utsname.release); + printk(" EFLAGS: %08lx %s (%s %.*s)\n", + regs->eflags, print_tainted(), system_utsname.release, + (int)strcspn(system_utsname.version, " "), + system_utsname.version); printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", regs->eax,regs->ebx,regs->ecx,regs->edx); printk("ESI: %08lx EDI: %08lx EBP: %08lx", diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index 47675bbbb31..7c86e3c5f1c 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c @@ -45,6 +45,15 @@ static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */ static unsigned long long monotonic_base; static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED; +/* Avoid compensating for lost ticks before TSCs are synched */ +static int detect_lost_ticks; +static int __init start_lost_tick_compensation(void) +{ + detect_lost_ticks = 1; + return 0; +} +late_initcall(start_lost_tick_compensation); + /* convert from cycles(64bits) => nanoseconds (64bits) * basic equation: * ns = cycles / (freq / ns_per_sec) @@ -196,7 +205,8 @@ static void mark_offset_tsc_hpet(void) /* lost tick compensation */ offset = hpet_readl(HPET_T0_CMP) - hpet_tick; - if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) { + if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0)) + && detect_lost_ticks) { int lost_ticks = (offset - hpet_last) / hpet_tick; jiffies_64 += lost_ticks; } @@ -421,7 +431,7 @@ static void mark_offset_tsc(void) delta += delay_at_last_interrupt; lost = delta/(1000000/HZ); delay = delta%(1000000/HZ); - if (lost >= 2) { + if (lost >= 2 && detect_lost_ticks) { jiffies_64 += lost-1; /* sanity check to ensure we're not always losing ticks */ diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 0aaebf3e1cf..b814dbdcc91 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -166,7 +166,8 @@ static void show_trace_log_lvl(struct task_struct *task, stack = (unsigned long*)context->previous_esp; if (!stack) break; - printk(KERN_EMERG " =======================\n"); + printk(log_lvl); + printk(" =======================\n"); } } @@ -239,9 +240,11 @@ void show_registers(struct pt_regs *regs) } print_modules(); printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n" - "EFLAGS: %08lx (%s) \n", + "EFLAGS: %08lx (%s %.*s) \n", smp_processor_id(), 0xffff & regs->xcs, regs->eip, - print_tainted(), regs->eflags, system_utsname.release); + print_tainted(), regs->eflags, system_utsname.release, + (int)strcspn(system_utsname.version, " "), + system_utsname.version); print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip); printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", regs->eax, regs->ebx, regs->ecx, regs->edx); |