summaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/acpi.c200
-rw-r--r--arch/ia64/kernel/asm-offsets.c1
-rw-r--r--arch/ia64/kernel/crash.c32
-rw-r--r--arch/ia64/kernel/crash_dump.c3
-rw-r--r--arch/ia64/kernel/efi.c43
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/err_inject.c293
-rw-r--r--arch/ia64/kernel/fsys.S105
-rw-r--r--arch/ia64/kernel/iosapic.c13
-rw-r--r--arch/ia64/kernel/irq_ia64.c2
-rw-r--r--arch/ia64/kernel/machine_kexec.c15
-rw-r--r--arch/ia64/kernel/mca.c8
-rw-r--r--arch/ia64/kernel/mca_drv.c65
-rw-r--r--arch/ia64/kernel/msi_ia64.c25
-rw-r--r--arch/ia64/kernel/perfmon.c80
-rw-r--r--arch/ia64/kernel/process.c16
-rw-r--r--arch/ia64/kernel/ptrace.c15
-rw-r--r--arch/ia64/kernel/sal.c3
-rw-r--r--arch/ia64/kernel/salinfo.c4
-rw-r--r--arch/ia64/kernel/setup.c91
-rw-r--r--arch/ia64/kernel/smp.c4
-rw-r--r--arch/ia64/kernel/smpboot.c17
-rw-r--r--arch/ia64/kernel/traps.c30
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S3
25 files changed, 801 insertions, 270 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 098ee605bf5..33e5a598672 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
obj-$(CONFIG_AUDIT) += audit.o
obj-$(CONFIG_PCI_MSI) += msi_ia64.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
+obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
obj-$(CONFIG_IA64_ESI) += esi.o
ifneq ($(CONFIG_IA64_ESI),)
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 29f05d4b68c..3549c94467b 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -55,7 +55,7 @@
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
- ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
+ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
#define PREFIX "ACPI: "
@@ -67,16 +67,11 @@ EXPORT_SYMBOL(pm_power_off);
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
-#define MAX_SAPICS 256
-u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
-
-EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
-
const char *acpi_get_sysname(void)
{
#ifdef CONFIG_IA64_GENERIC
unsigned long rsdp_phys;
- struct acpi20_table_rsdp *rsdp;
+ struct acpi_table_rsdp *rsdp;
struct acpi_table_xsdt *xsdt;
struct acpi_table_header *hdr;
@@ -87,16 +82,16 @@ const char *acpi_get_sysname(void)
return "dig";
}
- rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys);
- if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
+ rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
+ if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) {
printk(KERN_ERR
"ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
return "dig";
}
- xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address);
+ xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
hdr = &xsdt->header;
- if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
+ if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
printk(KERN_ERR
"ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
return "dig";
@@ -169,12 +164,12 @@ struct acpi_table_madt *acpi_madt __initdata;
static u8 has_8259;
static int __init
-acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
+acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
const unsigned long end)
{
- struct acpi_table_lapic_addr_ovr *lapic;
+ struct acpi_madt_local_apic_override *lapic;
- lapic = (struct acpi_table_lapic_addr_ovr *)header;
+ lapic = (struct acpi_madt_local_apic_override *)header;
if (BAD_MADT_ENTRY(lapic, end))
return -EINVAL;
@@ -187,22 +182,19 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
}
static int __init
-acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
+acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
{
- struct acpi_table_lsapic *lsapic;
+ struct acpi_madt_local_sapic *lsapic;
- lsapic = (struct acpi_table_lsapic *)header;
+ lsapic = (struct acpi_madt_local_sapic *)header;
- if (BAD_MADT_ENTRY(lsapic, end))
- return -EINVAL;
+ /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
- if (lsapic->flags.enabled) {
+ if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[available_cpus] =
(lsapic->id << 8) | lsapic->eid;
#endif
- ia64_acpiid_to_sapicid[lsapic->acpi_id] =
- (lsapic->id << 8) | lsapic->eid;
++available_cpus;
}
@@ -211,11 +203,11 @@ acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
}
static int __init
-acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
+acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
{
- struct acpi_table_lapic_nmi *lacpi_nmi;
+ struct acpi_madt_local_apic_nmi *lacpi_nmi;
- lacpi_nmi = (struct acpi_table_lapic_nmi *)header;
+ lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
if (BAD_MADT_ENTRY(lacpi_nmi, end))
return -EINVAL;
@@ -225,11 +217,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
}
static int __init
-acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
+acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
{
- struct acpi_table_iosapic *iosapic;
+ struct acpi_madt_io_sapic *iosapic;
- iosapic = (struct acpi_table_iosapic *)header;
+ iosapic = (struct acpi_madt_io_sapic *)header;
if (BAD_MADT_ENTRY(iosapic, end))
return -EINVAL;
@@ -240,13 +232,13 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
static unsigned int __initdata acpi_madt_rev;
static int __init
-acpi_parse_plat_int_src(acpi_table_entry_header * header,
+acpi_parse_plat_int_src(struct acpi_subtable_header * header,
const unsigned long end)
{
- struct acpi_table_plat_int_src *plintsrc;
+ struct acpi_madt_interrupt_source *plintsrc;
int vector;
- plintsrc = (struct acpi_table_plat_int_src *)header;
+ plintsrc = (struct acpi_madt_interrupt_source *)header;
if (BAD_MADT_ENTRY(plintsrc, end))
return -EINVAL;
@@ -257,19 +249,19 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header,
*/
vector = iosapic_register_platform_intr(plintsrc->type,
plintsrc->global_irq,
- plintsrc->iosapic_vector,
+ plintsrc->io_sapic_vector,
plintsrc->eid,
plintsrc->id,
- (plintsrc->flags.polarity ==
- 1) ? IOSAPIC_POL_HIGH :
- IOSAPIC_POL_LOW,
- (plintsrc->flags.trigger ==
- 1) ? IOSAPIC_EDGE :
- IOSAPIC_LEVEL);
+ ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
+ ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
+ IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
+ ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
+ ACPI_MADT_TRIGGER_EDGE) ?
+ IOSAPIC_EDGE : IOSAPIC_LEVEL);
platform_intr_list[plintsrc->type] = vector;
if (acpi_madt_rev > 1) {
- acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag;
+ acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
}
/*
@@ -324,30 +316,32 @@ unsigned int get_cpei_target_cpu(void)
}
static int __init
-acpi_parse_int_src_ovr(acpi_table_entry_header * header,
+acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
const unsigned long end)
{
- struct acpi_table_int_src_ovr *p;
+ struct acpi_madt_interrupt_override *p;
- p = (struct acpi_table_int_src_ovr *)header;
+ p = (struct acpi_madt_interrupt_override *)header;
if (BAD_MADT_ENTRY(p, end))
return -EINVAL;
- iosapic_override_isa_irq(p->bus_irq, p->global_irq,
- (p->flags.polarity ==
- 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
- (p->flags.trigger ==
- 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
+ iosapic_override_isa_irq(p->source_irq, p->global_irq,
+ ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
+ ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
+ IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
+ ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
+ ACPI_MADT_TRIGGER_EDGE) ?
+ IOSAPIC_EDGE : IOSAPIC_LEVEL);
return 0;
}
static int __init
-acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
+acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
{
- struct acpi_table_nmi_src *nmi_src;
+ struct acpi_madt_nmi_source *nmi_src;
- nmi_src = (struct acpi_table_nmi_src *)header;
+ nmi_src = (struct acpi_madt_nmi_source *)header;
if (BAD_MADT_ENTRY(nmi_src, end))
return -EINVAL;
@@ -371,12 +365,12 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
}
}
-static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
+static int __init acpi_parse_madt(struct acpi_table_header *table)
{
- if (!phys_addr || !size)
+ if (!table)
return -EINVAL;
- acpi_madt = (struct acpi_table_madt *)__va(phys_addr);
+ acpi_madt = (struct acpi_table_madt *)table;
acpi_madt_rev = acpi_madt->header.revision;
@@ -384,14 +378,14 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
#ifdef CONFIG_ITANIUM
has_8259 = 1; /* Firmware on old Itanium systems is broken */
#else
- has_8259 = acpi_madt->flags.pcat_compat;
+ has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
#endif
iosapic_system_init(has_8259);
/* Get base address of IPI Message Block */
- if (acpi_madt->lapic_address)
- ipi_base_addr = ioremap(acpi_madt->lapic_address, 0);
+ if (acpi_madt->address)
+ ipi_base_addr = ioremap(acpi_madt->address, 0);
printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
@@ -413,23 +407,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
static struct acpi_table_slit __initdata *slit_table;
-static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa)
+static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
{
int pxm;
- pxm = pa->proximity_domain;
+ pxm = pa->proximity_domain_lo;
if (ia64_platform_is("sn2"))
- pxm += pa->reserved[0] << 8;
+ pxm += pa->proximity_domain_hi[0] << 8;
return pxm;
}
-static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma)
+static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
{
int pxm;
pxm = ma->proximity_domain;
- if (ia64_platform_is("sn2"))
- pxm += ma->reserved1[0] << 8;
+ if (!ia64_platform_is("sn2"))
+ pxm &= 0xff;
+
return pxm;
}
@@ -442,7 +437,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
u32 len;
len = sizeof(struct acpi_table_header) + 8
- + slit->localities * slit->localities;
+ + slit->locality_count * slit->locality_count;
if (slit->header.length != len) {
printk(KERN_ERR
"ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
@@ -454,11 +449,11 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
}
void __init
-acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
+acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
{
int pxm;
- if (!pa->flags.enabled)
+ if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
return;
pxm = get_processor_proximity_domain(pa);
@@ -467,14 +462,14 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
pxm_bit_set(pxm);
node_cpuid[srat_num_cpus].phys_id =
- (pa->apic_id << 8) | (pa->lsapic_eid);
+ (pa->apic_id << 8) | (pa->local_sapic_eid);
/* nid should be overridden as logical node id later */
node_cpuid[srat_num_cpus].nid = pxm;
srat_num_cpus++;
}
void __init
-acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
+acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
unsigned long paddr, size;
int pxm;
@@ -483,13 +478,11 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
pxm = get_memory_proximity_domain(ma);
/* fill node memory chunk structure */
- paddr = ma->base_addr_hi;
- paddr = (paddr << 32) | ma->base_addr_lo;
- size = ma->length_hi;
- size = (size << 32) | ma->length_lo;
+ paddr = ma->base_address;
+ size = ma->length;
/* Ignore disabled entries */
- if (!ma->flags.enabled)
+ if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
return;
/* record this node in proximity bitmap */
@@ -560,16 +553,16 @@ void __init acpi_numa_arch_fixup(void)
if (!slit_table)
return;
memset(numa_slit, -1, sizeof(numa_slit));
- for (i = 0; i < slit_table->localities; i++) {
+ for (i = 0; i < slit_table->locality_count; i++) {
if (!pxm_bit_test(i))
continue;
node_from = pxm_to_node(i);
- for (j = 0; j < slit_table->localities; j++) {
+ for (j = 0; j < slit_table->locality_count; j++) {
if (!pxm_bit_test(j))
continue;
node_to = pxm_to_node(j);
node_distance(node_from, node_to) =
- slit_table->entry[i * slit_table->localities + j];
+ slit_table->entry[i * slit_table->locality_count + j];
}
}
@@ -617,21 +610,21 @@ void acpi_unregister_gsi(u32 gsi)
EXPORT_SYMBOL(acpi_unregister_gsi);
-static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
+static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
struct acpi_table_header *fadt_header;
- struct fadt_descriptor *fadt;
+ struct acpi_table_fadt *fadt;
- if (!phys_addr || !size)
+ if (!table)
return -EINVAL;
- fadt_header = (struct acpi_table_header *)__va(phys_addr);
+ fadt_header = (struct acpi_table_header *)table;
if (fadt_header->revision != 3)
return -ENODEV; /* Only deal with ACPI 2.0 FADT */
- fadt = (struct fadt_descriptor *)fadt_header;
+ fadt = (struct acpi_table_fadt *)fadt_header;
- acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
+ acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
return 0;
}
@@ -658,7 +651,7 @@ int __init acpi_boot_init(void)
* information -- the successor to MPS tables.
*/
- if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) {
+ if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
printk(KERN_ERR PREFIX "Can't find MADT\n");
goto skip_madt;
}
@@ -666,40 +659,40 @@ int __init acpi_boot_init(void)
/* Local APIC */
if (acpi_table_parse_madt
- (ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0)
+ (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
printk(KERN_ERR PREFIX
"Error parsing LAPIC address override entry\n");
- if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS)
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
< 1)
printk(KERN_ERR PREFIX
"Error parsing MADT - no LAPIC entries\n");
- if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0)
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
< 0)
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
/* I/O APIC */
if (acpi_table_parse_madt
- (ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
+ (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
printk(KERN_ERR PREFIX
"Error parsing MADT - no IOSAPIC entries\n");
/* System-Level Interrupt Routing */
if (acpi_table_parse_madt
- (ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src,
+ (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
printk(KERN_ERR PREFIX
"Error parsing platform interrupt source entry\n");
if (acpi_table_parse_madt
- (ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0)
+ (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
printk(KERN_ERR PREFIX
"Error parsing interrupt source overrides entry\n");
- if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0)
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
skip_madt:
@@ -709,7 +702,7 @@ int __init acpi_boot_init(void)
* gets interrupts such as power and sleep buttons. If it's not
* on a Legacy interrupt, it needs to be setup.
*/
- if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
+ if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
printk(KERN_ERR PREFIX "Can't find FADT\n");
#ifdef CONFIG_SMP
@@ -842,7 +835,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
- struct acpi_table_lsapic *lsapic;
+ struct acpi_madt_local_sapic *lsapic;
cpumask_t tmp_map;
long physid;
int cpu;
@@ -854,16 +847,16 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
return -EINVAL;
obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER ||
- obj->buffer.length < sizeof(*lsapic)) {
+ if (obj->type != ACPI_TYPE_BUFFER)
+ {
kfree(buffer.pointer);
return -EINVAL;
}
- lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer;
+ lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
- if ((lsapic->header.type != ACPI_MADT_LSAPIC) ||
- (!lsapic->flags.enabled)) {
+ if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
+ (!lsapic->lapic_flags & ACPI_MADT_ENABLED)) {
kfree(buffer.pointer);
return -EINVAL;
}
@@ -883,7 +876,6 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
cpu_set(cpu, cpu_present_map);
ia64_cpu_to_sapicid[cpu] = physid;
- ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
*pcpu = cpu;
return (0);
@@ -893,14 +885,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
int acpi_unmap_lsapic(int cpu)
{
- int i;
-
- for (i = 0; i < MAX_SAPICS; i++) {
- if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
- ia64_acpiid_to_sapicid[i] = -1;
- break;
- }
- }
ia64_cpu_to_sapicid[cpu] = -1;
cpu_clear(cpu, cpu_present_map);
@@ -920,7 +904,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
- struct acpi_table_iosapic *iosapic;
+ struct acpi_madt_io_sapic *iosapic;
unsigned int gsi_base;
int pxm, node;
@@ -938,9 +922,9 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
return AE_OK;
}
- iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer;
+ iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
- if (iosapic->header.type != ACPI_MADT_IOSAPIC) {
+ if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
kfree(buffer.pointer);
return AE_OK;
}
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 75a2a2c1225..2236fabbb3c 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -35,6 +35,7 @@ void foo(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
BLANK();
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index bc2f64d7224..80a94e70782 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -21,9 +21,9 @@
#include <asm/mca.h>
int kdump_status[NR_CPUS];
-atomic_t kdump_cpu_freezed;
+static atomic_t kdump_cpu_frozen;
atomic_t kdump_in_progress;
-int kdump_on_init = 1;
+static int kdump_on_init = 1;
static inline Elf64_Word
*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
@@ -52,7 +52,7 @@ extern void ia64_dump_cpu_regs(void *);
static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
void
-crash_save_this_cpu()
+crash_save_this_cpu(void)
{
void *buf;
unsigned long cfm, sof, sol;
@@ -79,18 +79,20 @@ crash_save_this_cpu()
final_note(buf);
}
+#ifdef CONFIG_SMP
static int
kdump_wait_cpu_freeze(void)
{
int cpu_num = num_online_cpus() - 1;
int timeout = 1000;
while(timeout-- > 0) {
- if (atomic_read(&kdump_cpu_freezed) == cpu_num)
+ if (atomic_read(&kdump_cpu_frozen) == cpu_num)
return 0;
udelay(1000);
}
return 1;
}
+#endif
void
machine_crash_shutdown(struct pt_regs *pt)
@@ -106,8 +108,8 @@ machine_crash_shutdown(struct pt_regs *pt)
kexec_disable_iosapic();
#ifdef CONFIG_SMP
kdump_smp_send_stop();
+ /* not all cpu response to IPI, send INIT to freeze them */
if (kdump_wait_cpu_freeze() && kdump_on_init) {
- //not all cpu response to IPI, send INIT to freeze them
kdump_smp_send_init();
}
#endif
@@ -116,6 +118,11 @@ machine_crash_shutdown(struct pt_regs *pt)
static void
machine_kdump_on_init(void)
{
+ if (!ia64_kimage) {
+ printk(KERN_NOTICE "machine_kdump_on_init(): "
+ "kdump not configured\n");
+ return;
+ }
local_irq_disable();
kexec_disable_iosapic();
machine_kexec(ia64_kimage);
@@ -129,14 +136,15 @@ kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
cpuid = smp_processor_id();
crash_save_this_cpu();
current->thread.ksp = (__u64)info->sw - 16;
- atomic_inc(&kdump_cpu_freezed);
+ atomic_inc(&kdump_cpu_frozen);
kdump_status[cpuid] = 1;
mb();
- if (cpuid == 0) {
- for (;;)
- cpu_relax();
- } else
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpuid != 0)
ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
+#endif
+ for (;;)
+ cpu_relax();
}
static int
@@ -156,7 +164,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
nd = (struct ia64_mca_notify_die *)args->err;
/* Reason code 1 means machine check rendezous*/
- if ((val == DIE_INIT_MONARCH_ENTER || DIE_INIT_SLAVE_ENTER) &&
+ if ((val == DIE_INIT_MONARCH_ENTER || val == DIE_INIT_SLAVE_ENTER) &&
nd->sos->rv_rc == 1)
return NOTIFY_DONE;
@@ -214,7 +222,7 @@ machine_crash_setup(void)
if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
return ret;
#ifdef CONFIG_SYSCTL
- register_sysctl_table(sys_table, 0);
+ register_sysctl_table(sys_table);
#endif
return 0;
}
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 83b8c91c140..da60e90eeeb 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -9,7 +9,8 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
+#include <asm/page.h>
+#include <asm/uaccess.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 0b25a7d4e1e..f45f91d38ca 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -21,6 +21,7 @@
* Skip non-WB memory and ignore empty memory ranges.
*/
#include <linux/module.h>
+#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -380,7 +381,7 @@ efi_get_pal_addr (void)
#endif
return __va(md->phys_addr);
}
- printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
+ printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
__FUNCTION__);
return NULL;
}
@@ -413,11 +414,10 @@ efi_init (void)
efi_char16_t *c16;
u64 efi_desc_size;
char *cp, vendor[100] = "unknown";
- extern char saved_command_line[];
int i;
/* it's too early to be able to use the standard kernel command line support... */
- for (cp = saved_command_line; *cp; ) {
+ for (cp = boot_command_line; *cp; ) {
if (memcmp(cp, "mem=", 4) == 0) {
mem_limit = memparse(cp + 4, &cp);
} else if (memcmp(cp, "max_addr=", 9) == 0) {
@@ -971,6 +971,11 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
if (!is_memory_available(md))
continue;
+#ifdef CONFIG_CRASH_DUMP
+ /* saved_max_pfn should ignore max_addr= command line arg */
+ if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
+ saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
+#endif
/*
* Round ends inward to granule boundaries
* Give trimmings to uncached allocator
@@ -1137,7 +1142,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
/* find a block of memory aligned to 64M exclude reserved regions
rsvd_regions are sorted
*/
-unsigned long
+unsigned long __init
kdump_find_rsvd_region (unsigned long size,
struct rsvd_region *r, int n)
{
@@ -1178,3 +1183,33 @@ kdump_find_rsvd_region (unsigned long size,
return ~0UL;
}
#endif
+
+#ifdef CONFIG_PROC_VMCORE
+/* locate the size find a the descriptor at a certain address */
+unsigned long
+vmcore_find_descriptor_size (unsigned long address)
+{
+ void *efi_map_start, *efi_map_end, *p;
+ efi_memory_desc_t *md;
+ u64 efi_desc_size;
+ unsigned long ret = 0;
+
+ efi_map_start = __va(ia64_boot_param->efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+ efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+ md = p;
+ if (efi_wb(md) && md->type == EFI_LOADER_DATA
+ && md->phys_addr == address) {
+ ret = efi_md_size(md);
+ break;
+ }
+ }
+
+ if (ret == 0)
+ printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");
+
+ return ret;
+}
+#endif
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ac4b304bea3..55fd2d5471e 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1607,5 +1607,7 @@ sys_call_table:
data8 sys_sync_file_range // 1300
data8 sys_tee
data8 sys_vmsplice
+ data8 sys_ni_syscall // reserved for move_pages
+ data8 sys_getcpu
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
new file mode 100644
index 00000000000..d3e9f33e8bd
--- /dev/null
+++ b/arch/ia64/kernel/err_inject.c
@@ -0,0 +1,293 @@
+/*
+ * err_inject.c -
+ * 1.) Inject errors to a processor.
+ * 2.) Query error injection capabilities.
+ * This driver along with user space code can be acting as an error
+ * injection tool.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Written by: Fenghua Yu <fenghua.yu@intel.com>, Intel Corporation
+ * Copyright (C) 2006, Intel Corp. All rights reserved.
+ *
+ */
+#include <linux/sysdev.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+
+#define ERR_INJ_DEBUG
+
+#define ERR_DATA_BUFFER_SIZE 3 // Three 8-byte;
+
+#define define_one_ro(name) \
+static SYSDEV_ATTR(name, 0444, show_##name, NULL)
+
+#define define_one_rw(name) \
+static SYSDEV_ATTR(name, 0644, show_##name, store_##name)
+
+static u64 call_start[NR_CPUS];
+static u64 phys_addr[NR_CPUS];
+static u64 err_type_info[NR_CPUS];
+static u64 err_struct_info[NR_CPUS];
+static struct {
+ u64 data1;
+ u64 data2;
+ u64 data3;
+} __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS];
+static s64 status[NR_CPUS];
+static u64 capabilities[NR_CPUS];
+static u64 resources[NR_CPUS];
+
+#define show(name) \
+static ssize_t \
+show_##name(struct sys_device *dev, char *buf) \
+{ \
+ u32 cpu=dev->id; \
+ return sprintf(buf, "%lx\n", name[cpu]); \
+}
+
+#define store(name) \
+static ssize_t \
+store_##name(struct sys_device *dev, const char *buf, size_t size) \
+{ \
+ unsigned int cpu=dev->id; \
+ name[cpu] = simple_strtoull(buf, NULL, 16); \
+ return size; \
+}
+
+show(call_start)
+
+/* It's user's responsibility to call the PAL procedure on a specific
+ * processor. The cpu number in driver is only used for storing data.
+ */
+static ssize_t
+store_call_start(struct sys_device *dev, const char *buf, size_t size)
+{
+ unsigned int cpu=dev->id;
+ unsigned long call_start = simple_strtoull(buf, NULL, 16);
+
+#ifdef ERR_INJ_DEBUG
+ printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
+ printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
+ printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
+ printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
+ err_data_buffer[cpu].data1,
+ err_data_buffer[cpu].data2,
+ err_data_buffer[cpu].data3);
+#endif
+ switch (call_start) {
+ case 0: /* Do nothing. */
+ break;
+ case 1: /* Call pal_mc_error_inject in physical mode. */
+ status[cpu]=ia64_pal_mc_error_inject_phys(err_type_info[cpu],
+ err_struct_info[cpu],
+ ia64_tpa(&err_data_buffer[cpu]),
+ &capabilities[cpu],
+ &resources[cpu]);
+ break;
+ case 2: /* Call pal_mc_error_inject in virtual mode. */
+ status[cpu]=ia64_pal_mc_error_inject_virt(err_type_info[cpu],
+ err_struct_info[cpu],
+ ia64_tpa(&err_data_buffer[cpu]),
+ &capabilities[cpu],
+ &resources[cpu]);
+ break;
+ default:
+ status[cpu] = -EINVAL;
+ break;
+ }
+
+#ifdef ERR_INJ_DEBUG
+ printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
+ printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]);
+ printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
+#endif
+ return size;
+}
+
+show(err_type_info)
+store(err_type_info)
+
+static ssize_t
+show_virtual_to_phys(struct sys_device *dev, char *buf)
+{
+ unsigned int cpu=dev->id;
+ return sprintf(buf, "%lx\n", phys_addr[cpu]);
+}
+
+static ssize_t
+store_virtual_to_phys(struct sys_device *dev, const char *buf, size_t size)
+{
+ unsigned int cpu=dev->id;
+ u64 virt_addr=simple_strtoull(buf, NULL, 16);
+ int ret;
+
+ ret = get_user_pages(current, current->mm, virt_addr,
+ 1, VM_READ, 0, NULL, NULL);
+ if (ret<=0) {
+#ifdef ERR_INJ_DEBUG
+ printk("Virtual address %lx is not existing.\n",virt_addr);
+#endif
+ return -EINVAL;
+ }
+
+ phys_addr[cpu] = ia64_tpa(virt_addr);
+ return size;
+}
+
+show(err_struct_info)
+store(err_struct_info)
+
+static ssize_t
+show_err_data_buffer(struct sys_device *dev, char *buf)
+{
+ unsigned int cpu=dev->id;
+
+ return sprintf(buf, "%lx, %lx, %lx\n",
+ err_data_buffer[cpu].data1,
+ err_data_buffer[cpu].data2,
+ err_data_buffer[cpu].data3);
+}
+
+static ssize_t
+store_err_data_buffer(struct sys_device *dev, const char *buf, size_t size)
+{
+ unsigned int cpu=dev->id;
+ int ret;
+
+#ifdef ERR_INJ_DEBUG
+ printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
+ err_data_buffer[cpu].data1,
+ err_data_buffer[cpu].data2,
+ err_data_buffer[cpu].data3,
+ cpu);
+#endif
+ ret=sscanf(buf, "%lx, %lx, %lx",
+ &err_data_buffer[cpu].data1,
+ &err_data_buffer[cpu].data2,
+ &err_data_buffer[cpu].data3);
+ if (ret!=ERR_DATA_BUFFER_SIZE)
+ return -EINVAL;
+
+ return size;
+}
+
+show(status)
+show(capabilities)
+show(resources)
+
+define_one_rw(call_start);
+define_one_rw(err_type_info);
+define_one_rw(err_struct_info);
+define_one_rw(err_data_buffer);
+define_one_rw(virtual_to_phys);
+define_one_ro(status);
+define_one_ro(capabilities);
+define_one_ro(resources);
+
+static struct attribute *default_attrs[] = {
+ &attr_call_start.attr,
+ &attr_virtual_to_phys.attr,
+ &attr_err_type_info.attr,
+ &attr_err_struct_info.attr,
+ &attr_err_data_buffer.attr,
+ &attr_status.attr,
+ &attr_capabilities.attr,
+ &attr_resources.attr,
+ NULL
+};
+
+static struct attribute_group err_inject_attr_group = {
+ .attrs = default_attrs,
+ .name = "err_inject"
+};
+/* Add/Remove err_inject interface for CPU device */
+static int __cpuinit err_inject_add_dev(struct sys_device * sys_dev)
+{
+ return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
+}
+
+static int __cpuinit err_inject_remove_dev(struct sys_device * sys_dev)
+{
+ sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
+ return 0;
+}
+static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct sys_device *sys_dev;
+
+ sys_dev = get_cpu_sysdev(cpu);
+ switch (action) {
+ case CPU_ONLINE:
+ err_inject_add_dev(sys_dev);
+ break;
+ case CPU_DEAD:
+ err_inject_remove_dev(sys_dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
+{
+ .notifier_call = err_inject_cpu_callback,
+};
+
+static int __init
+err_inject_init(void)
+{
+ int i;
+
+#ifdef ERR_INJ_DEBUG
+ printk(KERN_INFO "Enter error injection driver.\n");
+#endif
+ for_each_online_cpu(i) {
+ err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE,
+ (void *)(long)i);
+ }
+
+ register_hotcpu_notifier(&err_inject_cpu_notifier);
+
+ return 0;
+}
+
+static void __exit
+err_inject_exit(void)
+{
+ int i;
+ struct sys_device *sys_dev;
+
+#ifdef ERR_INJ_DEBUG
+ printk(KERN_INFO "Exit error injection driver.\n");
+#endif
+ for_each_online_cpu(i) {
+ sys_dev = get_cpu_sysdev(i);
+ sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
+ }
+ unregister_hotcpu_notifier(&err_inject_cpu_notifier);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_AUTHOR("Fenghua Yu <fenghua.yu@intel.com>");
+MODULE_DESCRIPTION("MC error injection kenrel sysfs interface");
+MODULE_LICENSE("GPL");
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 7a05b1cb2ad..8589e84a27c 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -10,6 +10,8 @@
* probably broke it along the way... ;-)
* 13-Jul-04 clameter Implement fsys_clock_gettime and revise fsys_gettimeofday to make
* it capable of using memory based clocks without falling back to C code.
+ * 08-Feb-07 Fenghua Yu Implement fsys_getcpu.
+ *
*/
#include <asm/asmmacro.h>
@@ -505,6 +507,59 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
#endif
END(fsys_rt_sigprocmask)
+/*
+ * fsys_getcpu doesn't use the third parameter in this implementation. It reads
+ * current_thread_info()->cpu and corresponding node in cpu_to_node_map.
+ */
+ENTRY(fsys_getcpu)
+ .prologue
+ .altrp b6
+ .body
+ ;;
+ add r2=TI_FLAGS+IA64_TASK_SIZE,r16
+ tnat.nz p6,p0 = r32 // guard against NaT argument
+ add r3=TI_CPU+IA64_TASK_SIZE,r16
+ ;;
+ ld4 r3=[r3] // M r3 = thread_info->cpu
+ ld4 r2=[r2] // M r2 = thread_info->flags
+(p6) br.cond.spnt.few .fail_einval // B
+ ;;
+ tnat.nz p7,p0 = r33 // I guard against NaT argument
+(p7) br.cond.spnt.few .fail_einval // B
+#ifdef CONFIG_NUMA
+ movl r17=cpu_to_node_map
+ ;;
+EX(.fail_efault, probe.w.fault r32, 3) // M This takes 5 cycles
+EX(.fail_efault, probe.w.fault r33, 3) // M This takes 5 cycles
+ shladd r18=r3,1,r17
+ ;;
+ ld2 r20=[r18] // r20 = cpu_to_node_map[cpu]
+ and r2 = TIF_ALLWORK_MASK,r2
+ ;;
+ cmp.ne p8,p0=0,r2
+(p8) br.spnt.many fsys_fallback_syscall
+ ;;
+ ;;
+EX(.fail_efault, st4 [r32] = r3)
+EX(.fail_efault, st2 [r33] = r20)
+ mov r8=0
+ ;;
+#else
+EX(.fail_efault, probe.w.fault r32, 3) // M This takes 5 cycles
+EX(.fail_efault, probe.w.fault r33, 3) // M This takes 5 cycles
+ and r2 = TIF_ALLWORK_MASK,r2
+ ;;
+ cmp.ne p8,p0=0,r2
+(p8) br.spnt.many fsys_fallback_syscall
+ ;;
+EX(.fail_efault, st4 [r32] = r3)
+EX(.fail_efault, st2 [r33] = r0)
+ mov r8=0
+ ;;
+#endif
+ FSYS_RETURN
+END(fsys_getcpu)
+
ENTRY(fsys_fallback_syscall)
.prologue
.altrp b6
@@ -878,6 +933,56 @@ fsyscall_table:
data8 0 // timer_delete
data8 0 // clock_settime
data8 fsys_clock_gettime // clock_gettime
+ data8 0 // clock_getres // 1255
+ data8 0 // clock_nanosleep
+ data8 0 // fstatfs64
+ data8 0 // statfs64
+ data8 0 // mbind
+ data8 0 // get_mempolicy // 1260
+ data8 0 // set_mempolicy
+ data8 0 // mq_open
+ data8 0 // mq_unlink
+ data8 0 // mq_timedsend
+ data8 0 // mq_timedreceive // 1265
+ data8 0 // mq_notify
+ data8 0 // mq_getsetattr
+ data8 0 // kexec_load
+ data8 0 // vserver
+ data8 0 // waitid // 1270
+ data8 0 // add_key
+ data8 0 // request_key
+ data8 0 // keyctl
+ data8 0 // ioprio_set
+ data8 0 // ioprio_get // 1275
+ data8 0 // move_pages
+ data8 0 // inotify_init
+ data8 0 // inotify_add_watch
+ data8 0 // inotify_rm_watch
+ data8 0 // migrate_pages // 1280
+ data8 0 // openat
+ data8 0 // mkdirat
+ data8 0 // mknodat
+ data8 0 // fchownat
+ data8 0 // futimesat // 1285
+ data8 0 // newfstatat
+ data8 0 // unlinkat
+ data8 0 // renameat
+ data8 0 // linkat
+ data8 0 // symlinkat // 1290
+ data8 0 // readlinkat
+ data8 0 // fchmodat
+ data8 0 // faccessat
+ data8 0
+ data8 0 // 1295
+ data8 0 // unshare
+ data8 0 // splice
+ data8 0 // set_robust_list
+ data8 0 // get_robust_list
+ data8 0 // sync_file_range // 1300
+ data8 0 // tee
+ data8 0 // vmsplice
+ data8 0
+ data8 fsys_getcpu // getcpu // 1304
// fill in zeros for the remaining entries
.zero:
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 0fc5fb7865c..dcfbf3e7a9e 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -446,7 +446,7 @@ iosapic_end_level_irq (unsigned int irq)
#define iosapic_disable_level_irq mask_irq
#define iosapic_ack_level_irq nop
-struct hw_interrupt_type irq_type_iosapic_level = {
+struct irq_chip irq_type_iosapic_level = {
.name = "IO-SAPIC-level",
.startup = iosapic_startup_level_irq,
.shutdown = iosapic_shutdown_level_irq,
@@ -454,6 +454,8 @@ struct hw_interrupt_type irq_type_iosapic_level = {
.disable = iosapic_disable_level_irq,
.ack = iosapic_ack_level_irq,
.end = iosapic_end_level_irq,
+ .mask = mask_irq,
+ .unmask = unmask_irq,
.set_affinity = iosapic_set_affinity
};
@@ -493,7 +495,7 @@ iosapic_ack_edge_irq (unsigned int irq)
#define iosapic_disable_edge_irq nop
#define iosapic_end_edge_irq nop
-struct hw_interrupt_type irq_type_iosapic_edge = {
+struct irq_chip irq_type_iosapic_edge = {
.name = "IO-SAPIC-edge",
.startup = iosapic_startup_edge_irq,
.shutdown = iosapic_disable_edge_irq,
@@ -501,6 +503,8 @@ struct hw_interrupt_type irq_type_iosapic_edge = {
.disable = iosapic_disable_edge_irq,
.ack = iosapic_ack_edge_irq,
.end = iosapic_end_edge_irq,
+ .mask = mask_irq,
+ .unmask = unmask_irq,
.set_affinity = iosapic_set_affinity
};
@@ -925,6 +929,11 @@ iosapic_unregister_intr (unsigned int gsi)
/* Clear the interrupt controller descriptor */
idesc->chip = &no_irq_type;
+#ifdef CONFIG_SMP
+ /* Clear affinity */
+ cpus_setall(idesc->affinity);
+#endif
+
/* Clear the interrupt information */
memset(&iosapic_intr_info[vector], 0,
sizeof(struct iosapic_intr_info));
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index ba3ba8bc50b..456f57b087c 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -275,7 +275,7 @@ static struct irqaction ipi_irqaction = {
static struct irqaction resched_irqaction = {
.handler = dummy_handler,
- .flags = SA_INTERRUPT,
+ .flags = IRQF_DISABLED,
.name = "resched"
};
#endif
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index e2ccc9f660c..4f0f3b8c1ee 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -14,6 +14,7 @@
#include <linux/kexec.h>
#include <linux/cpu.h>
#include <linux/irq.h>
+#include <linux/efi.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/delay.h>
@@ -68,22 +69,10 @@ void machine_kexec_cleanup(struct kimage *image)
{
}
-void machine_shutdown(void)
-{
- int cpu;
-
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id())
- cpu_down(cpu);
- }
- kexec_disable_iosapic();
-}
-
/*
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
*/
-extern void *efi_get_pal_addr(void);
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
{
struct kimage *image = arg;
@@ -93,6 +82,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
unsigned long vector;
int ii;
+ BUG_ON(!image);
if (image->type == KEXEC_TYPE_CRASH) {
crash_save_this_cpu();
current->thread.ksp = (__u64)info->sw - 16;
@@ -131,6 +121,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
void machine_kexec(struct kimage *image)
{
+ BUG_ON(!image);
unw_init_running(ia64_machine_kexec, image);
for(;;);
}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index a76add3e76a..491687f84fb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1192,8 +1192,6 @@ void
ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
struct ia64_sal_os_state *sos)
{
- pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
- &sos->proc_state_param;
int recover, cpu = smp_processor_id();
struct task_struct *previous_current;
struct ia64_mca_notify_die nd =
@@ -1223,10 +1221,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
/* Get the MCA error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
- /* TLB error is only exist in this SAL error record */
- recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
- /* other error recovery */
- || (ia64_mca_ucmc_extension
+ /* MCA error recovery */
+ recover = (ia64_mca_ucmc_extension
&& ia64_mca_ucmc_extension(
IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
sos));
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index afc1403799c..832cf1e647e 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -602,11 +602,40 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx,
default:
break;
}
+ } else if (psp->cc && !psp->bc) { /* Cache error */
+ status = recover_from_read_error(slidx, peidx, pbci, sos);
}
return status;
}
+/*
+ * recover_from_tlb_check
+ * @peidx: pointer of index of processor error section
+ *
+ * Return value:
+ * 1 on Success / 0 on Failure
+ */
+static int
+recover_from_tlb_check(peidx_table_t *peidx)
+{
+ sal_log_mod_error_info_t *smei;
+ pal_tlb_check_info_t *ptci;
+
+ smei = (sal_log_mod_error_info_t *)peidx_tlb_check(peidx, 0);
+ ptci = (pal_tlb_check_info_t *)&(smei->check_info);
+
+ /*
+ * Look for signature of a duplicate TLB DTC entry, which is
+ * a SW bug and always fatal.
+ */
+ if (ptci->op == PAL_TLB_CHECK_OP_PURGE
+ && !(ptci->itr || ptci->dtc || ptci->itc))
+ return fatal_mca("Duplicate TLB entry");
+
+ return mca_recovered("TLB check recovered");
+}
+
/**
* recover_from_processor_error
* @platform: whether there are some platform error section or not
@@ -618,13 +647,6 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx,
* Return value:
* 1 on Success / 0 on Failure
*/
-/*
- * Later we try to recover when below all conditions are satisfied.
- * 1. Only one processor error section is exist.
- * 2. BUS_CHECK is exist and the others are not exist.(Except TLB_CHECK)
- * 3. The entry of BUS_CHECK_INFO is 1.
- * 4. "External bus error" flag is set and the others are not set.
- */
static int
recover_from_processor_error(int platform, slidx_table_t *slidx,
@@ -652,38 +674,39 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
return fatal_mca("error not contained");
/*
+ * Look for recoverable TLB check
+ */
+ if (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
+ return recover_from_tlb_check(peidx);
+
+ /*
* The cache check and bus check bits have four possible states
* cc bc
- * 0 0 Weird record, not recovered
- * 1 0 Cache error, not recovered
- * 0 1 I/O error, attempt recovery
* 1 1 Memory error, attempt recovery
+ * 1 0 Cache error, attempt recovery
+ * 0 1 I/O error, attempt recovery
+ * 0 0 Other error type, not recovered
*/
- if (psp->bc == 0 || pbci == NULL)
- return fatal_mca("No bus check");
+ if (psp->cc == 0 && (psp->bc == 0 || pbci == NULL))
+ return fatal_mca("No cache or bus check");
/*
- * Sorry, we cannot handle so many.
+ * Cannot handle more than one bus check.
*/
if (peidx_bus_check_num(peidx) > 1)
return fatal_mca("Too many bus checks");
- /*
- * Well, here is only one bus error.
- */
+
if (pbci->ib)
return fatal_mca("Internal Bus error");
- if (pbci->cc)
- return fatal_mca("Cache-cache error");
if (pbci->eb && pbci->bsi > 0)
return fatal_mca("External bus check fatal status");
/*
- * This is a local MCA and estimated as recoverble external bus error.
- * (e.g. a load from poisoned memory)
- * This means "there are some platform errors".
+ * This is a local MCA and estimated as a recoverble error.
*/
if (platform)
return recover_from_platform_error(slidx, peidx, pbci, sos);
+
/*
* On account of strange SAL error record, we cannot recover.
*/
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 822e59a1b82..c81080df70d 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -60,18 +60,23 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
msg.address_lo = addr;
write_msi_msg(irq, &msg);
- set_native_irq_info(irq, cpu_mask);
+ irq_desc[irq].affinity = cpu_mask;
}
#endif /* CONFIG_SMP */
-int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
+int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
struct msi_msg msg;
unsigned long dest_phys_id;
- unsigned int vector;
+ int irq, vector;
+ irq = create_irq();
+ if (irq < 0)
+ return irq;
+
+ set_irq_msi(irq, desc);
dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
- vector = irq;
+ vector = irq_to_vector(irq);
msg.address_hi = 0;
msg.address_lo =
@@ -89,12 +94,12 @@ int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
write_msi_msg(irq, &msg);
set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
- return 0;
+ return irq;
}
void ia64_teardown_msi_irq(unsigned int irq)
{
- return; /* no-op */
+ destroy_irq(irq);
}
static void ia64_ack_msi_irq(unsigned int irq)
@@ -105,7 +110,7 @@ static void ia64_ack_msi_irq(unsigned int irq)
static int ia64_msi_retrigger_irq(unsigned int irq)
{
- unsigned int vector = irq;
+ unsigned int vector = irq_to_vector(irq);
ia64_resend_irq(vector);
return 1;
@@ -126,12 +131,12 @@ static struct irq_chip ia64_msi_chip = {
};
-int arch_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
+int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
if (platform_setup_msi_irq)
- return platform_setup_msi_irq(irq, pdev);
+ return platform_setup_msi_irq(pdev, desc);
- return ia64_setup_msi_irq(irq, pdev);
+ return ia64_setup_msi_irq(pdev, desc);
}
void arch_teardown_msi_irq(unsigned int irq)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index aa94f60fa8e..abc7ad03588 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -147,7 +147,7 @@
* in UP:
* - we need to protect against PMU overflow interrupts (local_irq_disable)
*
- * spin_lock_irqsave()/spin_lock_irqrestore():
+ * spin_lock_irqsave()/spin_unlock_irqrestore():
* in SMP: local_irq_disable + spin_lock
* in UP : local_irq_disable
*
@@ -521,19 +521,57 @@ pfm_sysctl_t pfm_sysctl;
EXPORT_SYMBOL(pfm_sysctl);
static ctl_table pfm_ctl_table[]={
- {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
- {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
- {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
- {4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
- { 0, },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "debug",
+ .data = &pfm_sysctl.debug,
+ .maxlen = sizeof(int),
+ .mode = 0666,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "debug_ovfl",
+ .data = &pfm_sysctl.debug_ovfl,
+ .maxlen = sizeof(int),
+ .mode = 0666,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "fastctxsw",
+ .data = &pfm_sysctl.fastctxsw,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "expert_mode",
+ .data = &pfm_sysctl.expert_mode,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = &proc_dointvec,
+ },
+ {}
};
static ctl_table pfm_sysctl_dir[] = {
- {1, "perfmon", NULL, 0, 0755, pfm_ctl_table, },
- {0,},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "perfmon",
+ .mode = 0755,
+ .child = pfm_ctl_table,
+ },
+ {}
};
static ctl_table pfm_sysctl_root[] = {
- {1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, },
- {0,},
+ {
+ .ctl_name = CTL_KERN,
+ .procname = "kernel",
+ .mode = 0755,
+ .child = pfm_sysctl_dir,
+ },
+ {}
};
static struct ctl_table_header *pfm_sysctl_header;
@@ -621,7 +659,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
/* forward declaration */
-static struct file_operations pfm_file_ops;
+static const struct file_operations pfm_file_ops;
/*
* forward declarations
@@ -2126,7 +2164,7 @@ pfm_no_open(struct inode *irrelevant, struct file *dontcare)
-static struct file_operations pfm_file_ops = {
+static const struct file_operations pfm_file_ops = {
.llseek = no_llseek,
.read = pfm_read,
.write = pfm_write,
@@ -2261,7 +2299,7 @@ pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long ad
* allocate a sampling buffer and remaps it into the user address space of the task
*/
static int
-pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
+pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma = NULL;
@@ -2301,17 +2339,17 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */
- vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
DPRINT(("Cannot allocate vma\n"));
goto error_kmem;
}
- memset(vma, 0, sizeof(*vma));
/*
* partially initialize the vma for the sampling buffer
*/
vma->vm_mm = mm;
+ vma->vm_file = filp;
vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
@@ -2350,6 +2388,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
goto error;
}
+ get_file(filp);
+
/*
* now insert the vma in the vm list for the process, must be
* done with mmap lock held
@@ -2427,7 +2467,7 @@ pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
}
static int
-pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags,
+pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
unsigned int cpu, pfarg_context_t *arg)
{
pfm_buffer_fmt_t *fmt = NULL;
@@ -2468,7 +2508,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int
/*
* buffer is always remapped into the caller's address space
*/
- ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr);
+ ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
if (ret) goto error;
/* keep track of user address of buffer */
@@ -2679,7 +2719,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
* does the user want to sample?
*/
if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
- ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req);
+ ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
if (ret) goto buffer_error;
}
@@ -6597,7 +6637,7 @@ found:
return 0;
}
-static struct file_operations pfm_proc_fops = {
+static const struct file_operations pfm_proc_fops = {
.open = pfm_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -6689,7 +6729,7 @@ pfm_init(void)
/*
* create /proc/sys/kernel/perfmon (for debugging purposes)
*/
- pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0);
+ pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
/*
* initialize all our spinlocks
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 17685abaf49..ae96d417699 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -34,6 +34,7 @@
#include <asm/ia32.h>
#include <asm/irq.h>
#include <asm/kdebug.h>
+#include <asm/kexec.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
@@ -803,6 +804,21 @@ cpu_halt (void)
ia64_pal_halt(min_power_state);
}
+void machine_shutdown(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id())
+ cpu_down(cpu);
+ }
+#endif
+#ifdef CONFIG_KEXEC
+ kexec_disable_iosapic();
+#endif
+}
+
void
machine_restart (char *restart_cmd)
{
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index aa705e46b97..00f80324694 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -607,7 +607,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
*/
list_for_each_safe(this, next, &current->children) {
p = list_entry(this, struct task_struct, sibling);
- if (p->mm != mm)
+ if (p->tgid != child->tgid)
continue;
if (thread_matches(p, addr)) {
child = p;
@@ -1405,6 +1405,7 @@ ptrace_disable (struct task_struct *child)
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
/* make sure the single step/taken-branch trap bits are not set: */
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
child_psr->ss = 0;
child_psr->tb = 0;
}
@@ -1525,6 +1526,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
* Make sure the single step/taken-branch trap bits
* are not set:
*/
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
ia64_psr(pt)->ss = 0;
ia64_psr(pt)->tb = 0;
@@ -1556,6 +1558,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
goto out_tsk;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
if (request == PTRACE_SINGLESTEP) {
ia64_psr(pt)->ss = 1;
} else {
@@ -1570,6 +1573,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
case PTRACE_DETACH:
/* detach a process that was attached. */
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
ret = ptrace_detach(child, data);
goto out_tsk;
@@ -1595,13 +1599,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
}
-void
+static void
syscall_trace (void)
{
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
- if (!(current->ptrace & PT_PTRACED))
- return;
/*
* The 0x80 provides a way for the tracing parent to
* distinguish between a syscall stop and SIGTRAP delivery.
@@ -1664,7 +1664,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
audit_syscall_exit(success, result);
}
- if (test_thread_flag(TIF_SYSCALL_TRACE)
+ if ((test_thread_flag(TIF_SYSCALL_TRACE)
+ || test_thread_flag(TIF_SINGLESTEP))
&& (current->ptrace & PT_PTRACED))
syscall_trace();
}
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 20bad78b507..37c876f95db 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -194,9 +194,8 @@ static void __init
chk_nointroute_opt(void)
{
char *cp;
- extern char saved_command_line[];
- for (cp = saved_command_line; *cp; ) {
+ for (cp = boot_command_line; *cp; ) {
if (memcmp(cp, "nointroute", 10) == 0) {
no_int_routing = 1;
printk ("no_int_routing on\n");
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e375a2f0f2c..af9f8754d84 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -352,7 +352,7 @@ retry:
return size;
}
-static struct file_operations salinfo_event_fops = {
+static const struct file_operations salinfo_event_fops = {
.open = salinfo_event_open,
.read = salinfo_event_read,
};
@@ -568,7 +568,7 @@ salinfo_log_write(struct file *file, const char __user *buffer, size_t count, lo
return count;
}
-static struct file_operations salinfo_data_fops = {
+static const struct file_operations salinfo_data_fops = {
.open = salinfo_log_open,
.release = salinfo_log_release,
.read = salinfo_log_read,
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index f167b89f24e..6e19da122ae 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -90,8 +90,6 @@ static struct resource code_resource = {
.name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};
-extern void efi_initialize_iomem_resources(struct resource *,
- struct resource *);
extern char _text[], _end[], _etext[];
unsigned long ia64_max_cacheline_size;
@@ -250,6 +248,12 @@ reserve_memory (void)
}
#endif
+#ifdef CONFIG_PROC_VMCORE
+ if (reserve_elfcorehdr(&rsvd_region[n].start,
+ &rsvd_region[n].end) == 0)
+ n++;
+#endif
+
efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
n++;
@@ -261,7 +265,7 @@ reserve_memory (void)
* appropriate after a kernel panic.
*/
{
- char *from = strstr(saved_command_line, "crashkernel=");
+ char *from = strstr(boot_command_line, "crashkernel=");
unsigned long base, size;
if (from) {
size = memparse(from + 12, &from);
@@ -452,6 +456,30 @@ static int __init parse_elfcorehdr(char *arg)
return 0;
}
early_param("elfcorehdr", parse_elfcorehdr);
+
+int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end)
+{
+ unsigned long length;
+
+ /* We get the address using the kernel command line,
+ * but the size is extracted from the EFI tables.
+ * Both address and size are required for reservation
+ * to work properly.
+ */
+
+ if (elfcorehdr_addr >= ELFCORE_ADDR_MAX)
+ return -EINVAL;
+
+ if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
+ elfcorehdr_addr = ELFCORE_ADDR_MAX;
+ return -EINVAL;
+ }
+
+ *start = (unsigned long)__va(elfcorehdr_addr);
+ *end = *start + length;
+ return 0;
+}
+
#endif /* CONFIG_PROC_VMCORE */
void __init
@@ -462,7 +490,7 @@ setup_arch (char **cmdline_p)
ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
*cmdline_p = __va(ia64_boot_param->command_line);
- strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+ strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
efi_init();
io_port_init();
@@ -568,34 +596,31 @@ show_cpuinfo (struct seq_file *m, void *v)
{ 1UL << 1, "spontaneous deferral"},
{ 1UL << 2, "16-byte atomic ops" }
};
- char features[128], *cp, sep;
+ char features[128], *cp, *sep;
struct cpuinfo_ia64 *c = v;
unsigned long mask;
unsigned long proc_freq;
- int i;
+ int i, size;
mask = c->features;
/* build the feature string: */
- memcpy(features, " standard", 10);
+ memcpy(features, "standard", 9);
cp = features;
- sep = 0;
- for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
+ size = sizeof(features);
+ sep = "";
+ for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
if (mask & feature_bits[i].mask) {
- if (sep)
- *cp++ = sep;
- sep = ',';
- *cp++ = ' ';
- strcpy(cp, feature_bits[i].feature_name);
- cp += strlen(feature_bits[i].feature_name);
+ cp += snprintf(cp, size, "%s%s", sep,
+ feature_bits[i].feature_name),
+ sep = ", ";
mask &= ~feature_bits[i].mask;
+ size = sizeof(features) - (cp - features);
}
}
- if (mask) {
- /* print unknown features as a hex value: */
- if (sep)
- *cp++ = sep;
- sprintf(cp, " 0x%lx", mask);
+ if (mask && size > 1) {
+ /* print unknown features as a hex value */
+ snprintf(cp, size, "%s0x%lx", sep, mask);
}
proc_freq = cpufreq_quick_get(cpunum);
@@ -611,10 +636,10 @@ show_cpuinfo (struct seq_file *m, void *v)
"model name : %s\n"
"revision : %u\n"
"archrev : %u\n"
- "features :%s\n" /* don't change this---it _is_ right! */
+ "features : %s\n"
"cpu number : %lu\n"
"cpu regs : %u\n"
- "cpu MHz : %lu.%06lu\n"
+ "cpu MHz : %lu.%03lu\n"
"itc MHz : %lu.%06lu\n"
"BogoMIPS : %lu.%02lu\n",
cpunum, c->vendor, c->family, c->model,
@@ -666,12 +691,15 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo
};
-static char brandname[128];
+#define MAX_BRANDS 8
+static char brandname[MAX_BRANDS][128];
static char * __cpuinit
get_model_name(__u8 family, __u8 model)
{
+ static int overflow;
char brand[128];
+ int i;
memcpy(brand, "Unknown", 8);
if (ia64_pal_get_brand_info(brand)) {
@@ -683,12 +711,17 @@ get_model_name(__u8 family, __u8 model)
case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
}
}
- if (brandname[0] == '\0')
- return strcpy(brandname, brand);
- else if (strcmp(brandname, brand) == 0)
- return brandname;
- else
- return kstrdup(brand, GFP_KERNEL);
+ for (i = 0; i < MAX_BRANDS; i++)
+ if (strcmp(brandname[i], brand) == 0)
+ return brandname[i];
+ for (i = 0; i < MAX_BRANDS; i++)
+ if (brandname[i][0] == '\0')
+ return strcpy(brandname[i], brand);
+ if (overflow++ == 0)
+ printk(KERN_ERR
+ "%s: Table overflow. Some processor model information will be missing\n",
+ __FUNCTION__);
+ return "Unknown";
}
static void __cpuinit
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index f4c7f7769cf..55ddd809b02 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -221,13 +221,13 @@ send_IPI_self (int op)
#ifdef CONFIG_KEXEC
void
-kdump_smp_send_stop()
+kdump_smp_send_stop(void)
{
send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
}
void
-kdump_smp_send_init()
+kdump_smp_send_init(void)
{
unsigned int cpu, self_cpu;
self_cpu = smp_processor_id();
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index b21ddecea94..ff7df439da6 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -375,6 +375,7 @@ static void __devinit
smp_callin (void)
{
int cpuid, phys_id, itc_master;
+ struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
extern void ia64_init_itm(void);
extern volatile int time_keeper_id;
@@ -424,7 +425,21 @@ smp_callin (void)
* Get our bogomips.
*/
ia64_init_itm();
- calibrate_delay();
+
+ /*
+ * Delay calibration can be skipped if new processor is identical to the
+ * previous processor.
+ */
+ last_cpuinfo = cpu_data(cpuid - 1);
+ this_cpuinfo = local_cpu_data;
+ if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
+ last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
+ last_cpuinfo->features != this_cpuinfo->features ||
+ last_cpuinfo->revision != this_cpuinfo->revision ||
+ last_cpuinfo->family != this_cpuinfo->family ||
+ last_cpuinfo->archrev != this_cpuinfo->archrev ||
+ last_cpuinfo->model != this_cpuinfo->model)
+ calibrate_delay();
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
#ifdef CONFIG_IA32_SUPPORT
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index ab684747036..765cbe5ba6a 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -24,8 +24,6 @@
#include <asm/uaccess.h>
#include <asm/kdebug.h>
-extern spinlock_t timerlist_lock;
-
fpswa_interface_t *fpswa_interface;
EXPORT_SYMBOL(fpswa_interface);
@@ -53,34 +51,6 @@ trap_init (void)
fpswa_interface = __va(ia64_boot_param->fpswa);
}
-/*
- * Unlock any spinlocks which will prevent us from getting the message out (timerlist_lock
- * is acquired through the console unblank code)
- */
-void
-bust_spinlocks (int yes)
-{
- int loglevel_save = console_loglevel;
-
- if (yes) {
- oops_in_progress = 1;
- return;
- }
-
-#ifdef CONFIG_VT
- unblank_screen();
-#endif
- oops_in_progress = 0;
- /*
- * OK, the message is on the console. Now we call printk() without
- * oops_in_progress set so that printk will give klogd a poke. Hold onto
- * your hats...
- */
- console_loglevel = 15; /* NMI oopser may have shut the console up */
- printk(" ");
- console_loglevel = loglevel_save;
-}
-
void
die (const char *str, struct pt_regs *regs, long err)
{
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index d9599dcac78..69238264211 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -118,12 +118,14 @@ SECTIONS
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
{ *(.init.data) }
+#ifdef CONFIG_BLK_DEV_INITRD
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
{
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
}
+#endif
. = ALIGN(16);
.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
@@ -164,6 +166,7 @@ SECTIONS
}
#endif
+ . = ALIGN(8);
__con_initcall_start = .;
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
{ *(.con_initcall.init) }