diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/entry.S | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/gate.lds.S | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 265 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/topology.c | 367 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 18 |
7 files changed, 545 insertions, 117 deletions
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 0e3eda99e54..750e8e7fbdc 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1605,5 +1605,6 @@ sys_call_table: data8 sys_ni_syscall // reserved for pselect data8 sys_ni_syscall // 1295 reserved for ppoll data8 sys_unshare + data8 sys_splice .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index e1e4aba9ecd..7c99e6ec3da 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S @@ -59,6 +59,7 @@ SECTIONS *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) *(__ex_table) + *(__mca_table) } } diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 8832c553230..7956eb9058f 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -9,54 +9,65 @@ * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com> * - * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O APIC code. - * In particular, we now have separate handlers for edge - * and level triggered interrupts. - * 00/10/27 Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector allocation - * PCI to vector mapping, shared PCI interrupts. - * 00/10/27 D. Mosberger Document things a bit more to make them more understandable. - * Clean up much of the old IOSAPIC cruft. - * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts and fixes for - * ACPI S5(SoftOff) support. + * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O + * APIC code. In particular, we now have separate + * handlers for edge and level triggered + * interrupts. + * 00/10/27 Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector + * allocation PCI to vector mapping, shared PCI + * interrupts. + * 00/10/27 D. Mosberger Document things a bit more to make them more + * understandable. Clean up much of the old + * IOSAPIC cruft. + * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts + * and fixes for ACPI S5(SoftOff) support. * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT - * 02/01/07 E. Focht <efocht@ess.nec.de> Redirectable interrupt vectors in - * iosapic_set_affinity(), initializations for - * /proc/irq/#/smp_affinity + * 02/01/07 E. Focht <efocht@ess.nec.de> Redirectable interrupt + * vectors in iosapic_set_affinity(), + * initializations for /proc/irq/#/smp_affinity * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing. * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq - * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to IOSAPIC mapping - * error + * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to + * IOSAPIC mapping error * 02/07/29 T. Kochi Allocate interrupt vectors dynamically - * 02/08/04 T. Kochi Cleaned up terminology (irq, global system interrupt, vector, etc.) - * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's pci_irq code. + * 02/08/04 T. Kochi Cleaned up terminology (irq, global system + * interrupt, vector, etc.) + * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's + * pci_irq code. * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC. - * Remove iosapic_address & gsi_base from external interfaces. - * Rationalize __init/__devinit attributes. + * Remove iosapic_address & gsi_base from + * external interfaces. Rationalize + * __init/__devinit attributes. * 04/12/04 Ashok Raj <ashok.raj@intel.com> Intel Corporation 2004 - * Updated to work with irq migration necessary for CPU Hotplug + * Updated to work with irq migration necessary + * for CPU Hotplug */ /* - * Here is what the interrupt logic between a PCI device and the kernel looks like: + * Here is what the interrupt logic between a PCI device and the kernel looks + * like: * - * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC, INTD). The - * device is uniquely identified by its bus--, and slot-number (the function - * number does not matter here because all functions share the same interrupt - * lines). + * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC, + * INTD). The device is uniquely identified by its bus-, and slot-number + * (the function number does not matter here because all functions share + * the same interrupt lines). * - * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC controller. - * Multiple interrupt lines may have to share the same IOSAPIC pin (if they're level - * triggered and use the same polarity). Each interrupt line has a unique Global - * System Interrupt (GSI) number which can be calculated as the sum of the controller's - * base GSI number and the IOSAPIC pin number to which the line connects. + * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC + * controller. Multiple interrupt lines may have to share the same + * IOSAPIC pin (if they're level triggered and use the same polarity). + * Each interrupt line has a unique Global System Interrupt (GSI) number + * which can be calculated as the sum of the controller's base GSI number + * and the IOSAPIC pin number to which the line connects. * - * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the IOSAPIC pin - * into the IA-64 interrupt vector. This interrupt vector is then sent to the CPU. + * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the + * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then + * sent to the CPU. * - * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is used as - * architecture-independent interrupt handling mechanism in Linux. As an - * IRQ is a number, we have to have IA-64 interrupt vector number <-> IRQ number - * mapping. On smaller systems, we use one-to-one mapping between IA-64 vector and - * IRQ. A platform can implement platform_irq_to_vector(irq) and + * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is + * used as architecture-independent interrupt handling mechanism in Linux. + * As an IRQ is a number, we have to have + * IA-64 interrupt vector number <-> IRQ number mapping. On smaller + * systems, we use one-to-one mapping between IA-64 vector and IRQ. A + * platform can implement platform_irq_to_vector(irq) and * platform_local_vector_to_irq(vector) APIs to differentiate the mapping. * Please see also include/asm-ia64/hw_irq.h for those APIs. * @@ -64,9 +75,9 @@ * * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ * - * Note: The term "IRQ" is loosely used everywhere in Linux kernel to describe interrupts. - * Now we use "IRQ" only for Linux IRQ's. ISA IRQ (isa_irq) is the only exception in this - * source code. + * Note: The term "IRQ" is loosely used everywhere in Linux kernel to + * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ + * (isa_irq) is the only exception in this source code. */ #include <linux/config.h> @@ -90,7 +101,6 @@ #include <asm/ptrace.h> #include <asm/system.h> - #undef DEBUG_INTERRUPT_ROUTING #ifdef DEBUG_INTERRUPT_ROUTING @@ -99,36 +109,46 @@ #define DBG(fmt...) #endif -#define NR_PREALLOCATE_RTE_ENTRIES (PAGE_SIZE / sizeof(struct iosapic_rte_info)) +#define NR_PREALLOCATE_RTE_ENTRIES \ + (PAGE_SIZE / sizeof(struct iosapic_rte_info)) #define RTE_PREALLOCATED (1) static DEFINE_SPINLOCK(iosapic_lock); -/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */ +/* + * These tables map IA-64 vectors to the IOSAPIC pin that generates this + * vector. + */ struct iosapic_rte_info { - struct list_head rte_list; /* node in list of RTEs sharing the same vector */ + struct list_head rte_list; /* node in list of RTEs sharing the + * same vector */ char __iomem *addr; /* base address of IOSAPIC */ - unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */ + unsigned int gsi_base; /* first GSI assigned to this + * IOSAPIC */ char rte_index; /* IOSAPIC RTE index */ int refcnt; /* reference counter */ unsigned int flags; /* flags */ } ____cacheline_aligned; static struct iosapic_intr_info { - struct list_head rtes; /* RTEs using this vector (empty => not an IOSAPIC interrupt) */ + struct list_head rtes; /* RTEs using this vector (empty => + * not an IOSAPIC interrupt) */ int count; /* # of RTEs that shares this vector */ - u32 low32; /* current value of low word of Redirection table entry */ + u32 low32; /* current value of low word of + * Redirection table entry */ unsigned int dest; /* destination CPU physical ID */ unsigned char dmode : 3; /* delivery mode (see iosapic.h) */ - unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */ + unsigned char polarity: 1; /* interrupt polarity + * (see iosapic.h) */ unsigned char trigger : 1; /* trigger mode (see iosapic.h) */ } iosapic_intr_info[IA64_NUM_VECTORS]; static struct iosapic { char __iomem *addr; /* base address of IOSAPIC */ - unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */ - unsigned short num_rte; /* number of RTE in this IOSAPIC */ + unsigned int gsi_base; /* first GSI assigned to this + * IOSAPIC */ + unsigned short num_rte; /* # of RTEs on this IOSAPIC */ int rtes_inuse; /* # of RTEs in use on this IOSAPIC */ #ifdef CONFIG_NUMA unsigned short node; /* numa node association via pxm */ @@ -149,7 +169,8 @@ find_iosapic (unsigned int gsi) int i; for (i = 0; i < NR_IOSAPICS; i++) { - if ((unsigned) (gsi - iosapic_lists[i].gsi_base) < iosapic_lists[i].num_rte) + if ((unsigned) (gsi - iosapic_lists[i].gsi_base) < + iosapic_lists[i].num_rte) return i; } @@ -162,7 +183,8 @@ _gsi_to_vector (unsigned int gsi) struct iosapic_intr_info *info; struct iosapic_rte_info *rte; - for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info) + for (info = iosapic_intr_info; info < + iosapic_intr_info + IA64_NUM_VECTORS; ++info) list_for_each_entry(rte, &info->rtes, rte_list) if (rte->gsi_base + rte->rte_index == gsi) return info - iosapic_intr_info; @@ -185,8 +207,8 @@ gsi_to_irq (unsigned int gsi) unsigned long flags; int irq; /* - * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq - * numbers... + * XXX fix me: this assumes an identity mapping between IA-64 vector + * and Linux irq numbers... */ spin_lock_irqsave(&iosapic_lock, flags); { @@ -197,7 +219,8 @@ gsi_to_irq (unsigned int gsi) return irq; } -static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, unsigned int vec) +static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, + unsigned int vec) { struct iosapic_rte_info *rte; @@ -237,7 +260,9 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask) for (irq = 0; irq < NR_IRQS; ++irq) if (irq_to_vector(irq) == vector) { - set_irq_affinity_info(irq, (int)(dest & 0xffff), redir); + set_irq_affinity_info(irq, + (int)(dest & 0xffff), + redir); break; } } @@ -259,7 +284,7 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask) } static void -nop (unsigned int vector) +nop (unsigned int irq) { /* do nothing... */ } @@ -281,7 +306,8 @@ mask_irq (unsigned int irq) { /* set only the mask bit */ low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK; - list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) { + list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, + rte_list) { addr = rte->addr; rte_index = rte->rte_index; iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); @@ -306,7 +332,8 @@ unmask_irq (unsigned int irq) spin_lock_irqsave(&iosapic_lock, flags); { low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK; - list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) { + list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, + rte_list) { addr = rte->addr; rte_index = rte->rte_index; iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); @@ -346,21 +373,25 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) spin_lock_irqsave(&iosapic_lock, flags); { - low32 = iosapic_intr_info[vec].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT); + low32 = iosapic_intr_info[vec].low32 & + ~(7 << IOSAPIC_DELIVERY_SHIFT); if (redir) /* change delivery mode to lowest priority */ - low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT); + low32 |= (IOSAPIC_LOWEST_PRIORITY << + IOSAPIC_DELIVERY_SHIFT); else /* change delivery mode to fixed */ low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); iosapic_intr_info[vec].low32 = low32; iosapic_intr_info[vec].dest = dest; - list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) { + list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, + rte_list) { addr = rte->addr; rte_index = rte->rte_index; - iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); + iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), + high32); iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); } } @@ -433,7 +464,8 @@ iosapic_ack_edge_irq (unsigned int irq) * interrupt for real. This prevents IRQ storms from unhandled * devices. */ - if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) == (IRQ_PENDING|IRQ_DISABLED)) + if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) == + (IRQ_PENDING|IRQ_DISABLED)) mask_irq(irq); } @@ -467,7 +499,8 @@ iosapic_version (char __iomem *addr) return iosapic_read(addr, IOSAPIC_VERSION); } -static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long pol) +static int iosapic_find_sharable_vector (unsigned long trigger, + unsigned long pol) { int i, vector = -1, min_count = -1; struct iosapic_intr_info *info; @@ -482,7 +515,8 @@ static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long po for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) { info = &iosapic_intr_info[i]; if (info->trigger == trigger && info->polarity == pol && - (info->dmode == IOSAPIC_FIXED || info->dmode == IOSAPIC_LOWEST_PRIORITY)) { + (info->dmode == IOSAPIC_FIXED || info->dmode == + IOSAPIC_LOWEST_PRIORITY)) { if (min_count == -1 || info->count < min_count) { vector = i; min_count = info->count; @@ -506,12 +540,15 @@ iosapic_reassign_vector (int vector) new_vector = assign_irq_vector(AUTO_ASSIGN); if (new_vector < 0) panic("%s: out of interrupt vectors!\n", __FUNCTION__); - printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector); + printk(KERN_INFO "Reassigning vector %d to %d\n", + vector, new_vector); memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], sizeof(struct iosapic_intr_info)); INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes); - list_move(iosapic_intr_info[vector].rtes.next, &iosapic_intr_info[new_vector].rtes); - memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); + list_move(iosapic_intr_info[vector].rtes.next, + &iosapic_intr_info[new_vector].rtes); + memset(&iosapic_intr_info[vector], 0, + sizeof(struct iosapic_intr_info)); iosapic_intr_info[vector].low32 = IOSAPIC_MASK; INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); } @@ -524,7 +561,8 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void) int preallocated = 0; if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) { - rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES); + rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * + NR_PREALLOCATE_RTE_ENTRIES); if (!rte) return NULL; for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++) @@ -532,7 +570,8 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void) } if (!list_empty(&free_rte_list)) { - rte = list_entry(free_rte_list.next, struct iosapic_rte_info, rte_list); + rte = list_entry(free_rte_list.next, struct iosapic_rte_info, + rte_list); list_del(&rte->rte_list); preallocated++; } else { @@ -575,7 +614,8 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, index = find_iosapic(gsi); if (index < 0) { - printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi); + printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", + __FUNCTION__, gsi); return -ENODEV; } @@ -586,7 +626,8 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, if (!rte) { rte = iosapic_alloc_rte(); if (!rte) { - printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__); + printk(KERN_WARNING "%s: cannot allocate memory\n", + __FUNCTION__); return -ENOMEM; } @@ -602,7 +643,9 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, else if (vector_is_shared(vector)) { struct iosapic_intr_info *info = &iosapic_intr_info[vector]; if (info->trigger != trigger || info->polarity != polarity) { - printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__); + printk (KERN_WARNING + "%s: cannot override the interrupt\n", + __FUNCTION__); return -EINVAL; } } @@ -619,8 +662,10 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, idesc = irq_descp(vector); if (idesc->handler != irq_type) { if (idesc->handler != &no_irq_type) - printk(KERN_WARNING "%s: changing vector %d from %s to %s\n", - __FUNCTION__, vector, idesc->handler->typename, irq_type->typename); + printk(KERN_WARNING + "%s: changing vector %d from %s to %s\n", + __FUNCTION__, vector, + idesc->handler->typename, irq_type->typename); idesc->handler = irq_type; } return 0; @@ -681,7 +726,7 @@ get_target_cpu (unsigned int gsi, int vector) if (!num_cpus) goto skip_numa_setup; - /* Use vector assigment to distribute across cpus in node */ + /* Use vector assignment to distribute across cpus in node */ cpu_index = vector % num_cpus; for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) @@ -703,7 +748,7 @@ skip_numa_setup: } while (!cpu_online(cpu)); return cpu_physical_id(cpu); -#else +#else /* CONFIG_SMP */ return cpu_physical_id(smp_processor_id()); #endif } @@ -755,7 +800,8 @@ again: if (list_empty(&iosapic_intr_info[vector].rtes)) free_irq_vector(vector); spin_unlock(&iosapic_lock); - spin_unlock_irqrestore(&irq_descp(vector)->lock, flags); + spin_unlock_irqrestore(&irq_descp(vector)->lock, + flags); goto again; } @@ -764,7 +810,8 @@ again: polarity, trigger); if (err < 0) { spin_unlock(&iosapic_lock); - spin_unlock_irqrestore(&irq_descp(vector)->lock, flags); + spin_unlock_irqrestore(&irq_descp(vector)->lock, + flags); return err; } @@ -806,7 +853,8 @@ iosapic_unregister_intr (unsigned int gsi) */ irq = gsi_to_irq(gsi); if (irq < 0) { - printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi); + printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", + gsi); WARN_ON(1); return; } @@ -817,7 +865,9 @@ iosapic_unregister_intr (unsigned int gsi) spin_lock(&iosapic_lock); { if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) { - printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi); + printk(KERN_ERR + "iosapic_unregister_intr(%u) unbalanced\n", + gsi); WARN_ON(1); goto out; } @@ -827,7 +877,8 @@ iosapic_unregister_intr (unsigned int gsi) /* Mask the interrupt */ low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK; - iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), low32); + iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), + low32); /* Remove the rte entry from the list */ list_del(&rte->rte_list); @@ -840,7 +891,9 @@ iosapic_unregister_intr (unsigned int gsi) trigger = iosapic_intr_info[vector].trigger; polarity = iosapic_intr_info[vector].polarity; dest = iosapic_intr_info[vector].dest; - printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n", + printk(KERN_INFO + "GSI %u (%s, %s) -> CPU %d (0x%04x)" + " vector %d unregistered\n", gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), cpu_logical_id(dest), dest, vector); @@ -853,12 +906,15 @@ iosapic_unregister_intr (unsigned int gsi) idesc->handler = &no_irq_type; /* Clear the interrupt information */ - memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); + memset(&iosapic_intr_info[vector], 0, + sizeof(struct iosapic_intr_info)); iosapic_intr_info[vector].low32 |= IOSAPIC_MASK; INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); if (idesc->action) { - printk(KERN_ERR "interrupt handlers still exist on IRQ %u\n", irq); + printk(KERN_ERR + "interrupt handlers still exist on" + "IRQ %u\n", irq); WARN_ON(1); } @@ -873,7 +929,6 @@ iosapic_unregister_intr (unsigned int gsi) /* * ACPI calls this when it finds an entry for a platform interrupt. - * Note that the irq_base and IOSAPIC address must be set in iosapic_init(). */ int __init iosapic_register_platform_intr (u32 int_type, unsigned int gsi, @@ -907,13 +962,16 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, mask = 1; break; default: - printk(KERN_ERR "iosapic_register_platform_irq(): invalid int type 0x%x\n", int_type); + printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__, + int_type); return -1; } register_intr(gsi, vector, delivery, polarity, trigger); - printk(KERN_INFO "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", + printk(KERN_INFO + "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)" + " vector %d\n", int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown", int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), @@ -923,10 +981,8 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, return vector; } - /* * ACPI calls this when it finds an entry for a legacy ISA IRQ override. - * Note that the gsi_base and IOSAPIC address must be set in iosapic_init(). */ void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, @@ -955,16 +1011,19 @@ iosapic_system_init (int system_pcat_compat) for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) { iosapic_intr_info[vector].low32 = IOSAPIC_MASK; - INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); /* mark as unused */ + /* mark as unused */ + INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); } pcat_compat = system_pcat_compat; if (pcat_compat) { /* - * Disable the compatibility mode interrupts (8259 style), needs IN/OUT support - * enabled. + * Disable the compatibility mode interrupts (8259 style), + * needs IN/OUT support enabled. */ - printk(KERN_INFO "%s: Disabling PC-AT compatible 8259 interrupts\n", __FUNCTION__); + printk(KERN_INFO + "%s: Disabling PC-AT compatible 8259 interrupts\n", + __FUNCTION__); outb(0xff, 0xA1); outb(0xff, 0x21); } @@ -1004,10 +1063,7 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver) base = iosapic_lists[index].gsi_base; end = base + iosapic_lists[index].num_rte - 1; - if (gsi_base < base && gsi_end < base) - continue;/* OK */ - - if (gsi_base > end && gsi_end > end) + if (gsi_end < base || end < gsi_base) continue; /* OK */ return -EBUSY; @@ -1053,12 +1109,14 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base) if ((gsi_base == 0) && pcat_compat) { /* - * Map the legacy ISA devices into the IOSAPIC data. Some of these may - * get reprogrammed later on with data from the ACPI Interrupt Source - * Override table. + * Map the legacy ISA devices into the IOSAPIC data. Some of + * these may get reprogrammed later on with data from the ACPI + * Interrupt Source Override table. */ for (isa_irq = 0; isa_irq < 16; ++isa_irq) - iosapic_override_isa_irq(isa_irq, isa_irq, IOSAPIC_POL_HIGH, IOSAPIC_EDGE); + iosapic_override_isa_irq(isa_irq, isa_irq, + IOSAPIC_POL_HIGH, + IOSAPIC_EDGE); } return 0; } @@ -1081,7 +1139,8 @@ iosapic_remove (unsigned int gsi_base) if (iosapic_lists[index].rtes_inuse) { err = -EBUSY; - printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n", + printk(KERN_WARNING + "%s: IOSAPIC for GSI base %u is busy\n", __FUNCTION__, gsi_base); goto out; } diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 89faa603c6b..6386f63c413 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -240,7 +240,7 @@ cache_info(char *page) } p += sprintf(p, "%s Cache level %lu:\n" - "\tSize : %lu bytes\n" + "\tSize : %u bytes\n" "\tAttributes : ", cache_types[j+cci.pcci_unified], i+1, cci.pcci_cache_size); @@ -648,9 +648,9 @@ frequency_info(char *page) if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0; p += sprintf(p, - "Processor/Clock ratio : %ld/%ld\n" - "Bus/Clock ratio : %ld/%ld\n" - "ITC/Clock ratio : %ld/%ld\n", + "Processor/Clock ratio : %d/%d\n" + "Bus/Clock ratio : %d/%d\n" + "ITC/Clock ratio : %d/%d\n", proc.num, proc.den, bus.num, bus.den, itc.num, itc.den); return p - page; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index ac167436e93..49958904045 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -188,7 +188,7 @@ ia64_init_itm (void) itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ; - printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, " + printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, " "ITC freq=%lu.%03luMHz", smp_processor_id(), platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000); diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 3b6fd798c4d..b47476d655f 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -9,6 +9,8 @@ * 2002/08/07 Erich Focht <efocht@ess.nec.de> * Populate cpu entries in sysfs for non-numa systems as well * Intel Corporation - Ashok Raj + * 02/27/2006 Zhang, Yanmin + * Populate cpu cache entries in sysfs for cpu cache info */ #include <linux/config.h> @@ -19,6 +21,7 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/nodemask.h> +#include <linux/notifier.h> #include <asm/mmzone.h> #include <asm/numa.h> #include <asm/cpu.h> @@ -101,3 +104,367 @@ out: } subsys_initcall(topology_init); + + +/* + * Export cpu cache information through sysfs + */ + +/* + * A bunch of string array to get pretty printing + */ +static const char *cache_types[] = { + "", /* not used */ + "Instruction", + "Data", + "Unified" /* unified */ +}; + +static const char *cache_mattrib[]={ + "WriteThrough", + "WriteBack", + "", /* reserved */ + "" /* reserved */ +}; + +struct cache_info { + pal_cache_config_info_t cci; + cpumask_t shared_cpu_map; + int level; + int type; + struct kobject kobj; +}; + +struct cpu_cache_info { + struct cache_info *cache_leaves; + int num_cache_leaves; + struct kobject kobj; +}; + +static struct cpu_cache_info all_cpu_cache_info[NR_CPUS]; +#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) + +#ifdef CONFIG_SMP +static void cache_shared_cpu_map_setup( unsigned int cpu, + struct cache_info * this_leaf) +{ + pal_cache_shared_info_t csi; + int num_shared, i = 0; + unsigned int j; + + if (cpu_data(cpu)->threads_per_core <= 1 && + cpu_data(cpu)->cores_per_socket <= 1) { + cpu_set(cpu, this_leaf->shared_cpu_map); + return; + } + + if (ia64_pal_cache_shared_info(this_leaf->level, + this_leaf->type, + 0, + &csi) != PAL_STATUS_SUCCESS) + return; + + num_shared = (int) csi.num_shared; + do { + for_each_cpu(j) + if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id + && cpu_data(j)->core_id == csi.log1_cid + && cpu_data(j)->thread_id == csi.log1_tid) + cpu_set(j, this_leaf->shared_cpu_map); + + i++; + } while (i < num_shared && + ia64_pal_cache_shared_info(this_leaf->level, + this_leaf->type, + i, + &csi) == PAL_STATUS_SUCCESS); +} +#else +static void cache_shared_cpu_map_setup(unsigned int cpu, + struct cache_info * this_leaf) +{ + cpu_set(cpu, this_leaf->shared_cpu_map); + return; +} +#endif + +static ssize_t show_coherency_line_size(struct cache_info *this_leaf, + char *buf) +{ + return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size); +} + +static ssize_t show_ways_of_associativity(struct cache_info *this_leaf, + char *buf) +{ + return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc); +} + +static ssize_t show_attributes(struct cache_info *this_leaf, char *buf) +{ + return sprintf(buf, + "%s\n", + cache_mattrib[this_leaf->cci.pcci_cache_attr]); +} + +static ssize_t show_size(struct cache_info *this_leaf, char *buf) +{ + return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024); +} + +static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf) +{ + unsigned number_of_sets = this_leaf->cci.pcci_cache_size; + number_of_sets /= this_leaf->cci.pcci_assoc; + number_of_sets /= 1 << this_leaf->cci.pcci_line_size; + + return sprintf(buf, "%u\n", number_of_sets); +} + +static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) +{ + ssize_t len; + cpumask_t shared_cpu_map; + + cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); + len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); + len += sprintf(buf+len, "\n"); + return len; +} + +static ssize_t show_type(struct cache_info *this_leaf, char *buf) +{ + int type = this_leaf->type + this_leaf->cci.pcci_unified; + return sprintf(buf, "%s\n", cache_types[type]); +} + +static ssize_t show_level(struct cache_info *this_leaf, char *buf) +{ + return sprintf(buf, "%u\n", this_leaf->level); +} + +struct cache_attr { + struct attribute attr; + ssize_t (*show)(struct cache_info *, char *); + ssize_t (*store)(struct cache_info *, const char *, size_t count); +}; + +#ifdef define_one_ro + #undef define_one_ro +#endif +#define define_one_ro(_name) \ + static struct cache_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +define_one_ro(level); +define_one_ro(type); +define_one_ro(coherency_line_size); +define_one_ro(ways_of_associativity); +define_one_ro(size); +define_one_ro(number_of_sets); +define_one_ro(shared_cpu_map); +define_one_ro(attributes); + +static struct attribute * cache_default_attrs[] = { + &type.attr, + &level.attr, + &coherency_line_size.attr, + &ways_of_associativity.attr, + &attributes.attr, + &size.attr, + &number_of_sets.attr, + &shared_cpu_map.attr, + NULL +}; + +#define to_object(k) container_of(k, struct cache_info, kobj) +#define to_attr(a) container_of(a, struct cache_attr, attr) + +static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf) +{ + struct cache_attr *fattr = to_attr(attr); + struct cache_info *this_leaf = to_object(kobj); + ssize_t ret; + + ret = fattr->show ? fattr->show(this_leaf, buf) : 0; + return ret; +} + +static struct sysfs_ops cache_sysfs_ops = { + .show = cache_show +}; + +static struct kobj_type cache_ktype = { + .sysfs_ops = &cache_sysfs_ops, + .default_attrs = cache_default_attrs, +}; + +static struct kobj_type cache_ktype_percpu_entry = { + .sysfs_ops = &cache_sysfs_ops, +}; + +static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) +{ + if (all_cpu_cache_info[cpu].cache_leaves) { + kfree(all_cpu_cache_info[cpu].cache_leaves); + all_cpu_cache_info[cpu].cache_leaves = NULL; + } + all_cpu_cache_info[cpu].num_cache_leaves = 0; + memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); + + return; +} + +static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) +{ + u64 i, levels, unique_caches; + pal_cache_config_info_t cci; + int j; + s64 status; + struct cache_info *this_cache; + int num_cache_leaves = 0; + + if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { + printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status); + return -1; + } + + this_cache=kzalloc(sizeof(struct cache_info)*unique_caches, + GFP_KERNEL); + if (this_cache == NULL) + return -ENOMEM; + + for (i=0; i < levels; i++) { + for (j=2; j >0 ; j--) { + if ((status=ia64_pal_cache_config_info(i,j, &cci)) != + PAL_STATUS_SUCCESS) + continue; + + this_cache[num_cache_leaves].cci = cci; + this_cache[num_cache_leaves].level = i + 1; + this_cache[num_cache_leaves].type = j; + + cache_shared_cpu_map_setup(cpu, + &this_cache[num_cache_leaves]); + num_cache_leaves ++; + } + } + + all_cpu_cache_info[cpu].cache_leaves = this_cache; + all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves; + + memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); + + return 0; +} + +/* Add cache interface for CPU device */ +static int __cpuinit cache_add_dev(struct sys_device * sys_dev) +{ + unsigned int cpu = sys_dev->id; + unsigned long i, j; + struct cache_info *this_object; + int retval = 0; + cpumask_t oldmask; + + if (all_cpu_cache_info[cpu].kobj.parent) + return 0; + + oldmask = current->cpus_allowed; + retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); + if (unlikely(retval)) + return retval; + + retval = cpu_cache_sysfs_init(cpu); + set_cpus_allowed(current, oldmask); + if (unlikely(retval < 0)) + return retval; + + all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj; + kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache"); + all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry; + retval = kobject_register(&all_cpu_cache_info[cpu].kobj); + + for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { + this_object = LEAF_KOBJECT_PTR(cpu,i); + this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj; + kobject_set_name(&(this_object->kobj), "index%1lu", i); + this_object->kobj.ktype = &cache_ktype; + retval = kobject_register(&(this_object->kobj)); + if (unlikely(retval)) { + for (j = 0; j < i; j++) { + kobject_unregister( + &(LEAF_KOBJECT_PTR(cpu,j)->kobj)); + } + kobject_unregister(&all_cpu_cache_info[cpu].kobj); + cpu_cache_sysfs_exit(cpu); + break; + } + } + return retval; +} + +/* Remove cache interface for CPU device */ +static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) +{ + unsigned int cpu = sys_dev->id; + unsigned long i; + + for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) + kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj)); + + if (all_cpu_cache_info[cpu].kobj.parent) { + kobject_unregister(&all_cpu_cache_info[cpu].kobj); + memset(&all_cpu_cache_info[cpu].kobj, + 0, + sizeof(struct kobject)); + } + + cpu_cache_sysfs_exit(cpu); + + return 0; +} + +/* + * When a cpu is hot-plugged, do a check and initiate + * cache kobject if necessary + */ +static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + struct sys_device *sys_dev; + + sys_dev = get_cpu_sysdev(cpu); + switch (action) { + case CPU_ONLINE: + cache_add_dev(sys_dev); + break; + case CPU_DEAD: + cache_remove_dev(sys_dev); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block cache_cpu_notifier = +{ + .notifier_call = cache_cpu_callback +}; + +static int __cpuinit cache_sysfs_init(void) +{ + int i; + + for_each_online_cpu(i) { + cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE, + (void *)(long)i); + } + + register_cpu_notifier(&cache_cpu_notifier); + + return 0; +} + +device_initcall(cache_sysfs_init); + diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0b9e56dd7f0..783600fe52b 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -70,6 +70,15 @@ SECTIONS __stop___ex_table = .; } + /* MCA table */ + . = ALIGN(16); + __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) + { + __start___mca_table = .; + *(__mca_table) + __stop___mca_table = .; + } + /* Global data */ _data = .; @@ -130,15 +139,6 @@ SECTIONS __initcall_end = .; } - /* MCA table */ - . = ALIGN(16); - __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) - { - __start___mca_table = .; - *(__mca_table) - __stop___mca_table = .; - } - .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) { __start___vtop_patchlist = .; |