summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/irq.c')
-rw-r--r--arch/sparc64/kernel/irq.c177
1 files changed, 88 insertions, 89 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 8cb3358674f..23956096b3b 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -87,7 +87,11 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY
*/
#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
-static unsigned int virt_to_real_irq_table[NR_IRQS];
+static struct {
+ unsigned int irq;
+ unsigned int dev_handle;
+ unsigned int dev_ino;
+} virt_to_real_irq_table[NR_IRQS];
static unsigned char virt_irq_alloc(unsigned int real_irq)
{
@@ -96,7 +100,7 @@ static unsigned char virt_irq_alloc(unsigned int real_irq)
BUILD_BUG_ON(NR_IRQS >= 256);
for (ent = 1; ent < NR_IRQS; ent++) {
- if (!virt_to_real_irq_table[ent])
+ if (!virt_to_real_irq_table[ent].irq)
break;
}
if (ent >= NR_IRQS) {
@@ -104,7 +108,7 @@ static unsigned char virt_irq_alloc(unsigned int real_irq)
return 0;
}
- virt_to_real_irq_table[ent] = real_irq;
+ virt_to_real_irq_table[ent].irq = real_irq;
return ent;
}
@@ -117,8 +121,8 @@ static void virt_irq_free(unsigned int virt_irq)
if (virt_irq >= NR_IRQS)
return;
- real_irq = virt_to_real_irq_table[virt_irq];
- virt_to_real_irq_table[virt_irq] = 0;
+ real_irq = virt_to_real_irq_table[virt_irq].irq;
+ virt_to_real_irq_table[virt_irq].irq = 0;
__bucket(real_irq)->virt_irq = 0;
}
@@ -126,7 +130,7 @@ static void virt_irq_free(unsigned int virt_irq)
static unsigned int virt_to_real_irq(unsigned char virt_irq)
{
- return virt_to_real_irq_table[virt_irq];
+ return virt_to_real_irq_table[virt_irq].irq;
}
/*
@@ -213,8 +217,27 @@ struct irq_handler_data {
void (*pre_handler)(unsigned int, void *, void *);
void *pre_handler_arg1;
void *pre_handler_arg2;
+
+ u32 msi;
};
+void sparc64_set_msi(unsigned int virt_irq, u32 msi)
+{
+ struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+
+ if (data)
+ data->msi = msi;
+}
+
+u32 sparc64_get_msi(unsigned int virt_irq)
+{
+ struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+
+ if (data)
+ return data->msi;
+ return 0xffffffff;
+}
+
static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
{
unsigned int real_irq = virt_to_real_irq(virt_irq);
@@ -304,7 +327,7 @@ static void sun4u_irq_disable(unsigned int virt_irq)
if (likely(data)) {
unsigned long imap = data->imap;
- u32 tmp = upa_readq(imap);
+ unsigned long tmp = upa_readq(imap);
tmp &= ~IMAP_VALID;
upa_writeq(tmp, imap);
@@ -336,15 +359,15 @@ static void sun4v_irq_enable(unsigned int virt_irq)
err = sun4v_intr_settarget(ino, cpuid);
if (err != HV_EOK)
- printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
- ino, cpuid, err);
+ printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
+ "err(%d)\n", ino, cpuid, err);
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
- printk("sun4v_intr_setstate(%x): "
+ printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
if (err != HV_EOK)
- printk("sun4v_intr_setenabled(%x): err(%d)\n",
+ printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
ino, err);
}
}
@@ -362,8 +385,8 @@ static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
err = sun4v_intr_settarget(ino, cpuid);
if (err != HV_EOK)
- printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
- ino, cpuid, err);
+ printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
+ "err(%d)\n", ino, cpuid, err);
}
}
@@ -377,7 +400,7 @@ static void sun4v_irq_disable(unsigned int virt_irq)
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
if (err != HV_EOK)
- printk("sun4v_intr_setenabled(%x): "
+ printk(KERN_ERR "sun4v_intr_setenabled(%x): "
"err(%d)\n", ino, err);
}
}
@@ -410,7 +433,7 @@ static void sun4v_irq_end(unsigned int virt_irq)
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
- printk("sun4v_intr_setstate(%x): "
+ printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
}
}
@@ -418,7 +441,6 @@ static void sun4v_irq_end(unsigned int virt_irq)
static void sun4v_virq_enable(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
if (likely(bucket)) {
unsigned long cpuid, dev_handle, dev_ino;
@@ -426,24 +448,24 @@ static void sun4v_virq_enable(unsigned int virt_irq)
cpuid = irq_choose_cpu(virt_irq);
- dev_handle = ino & IMAP_IGN;
- dev_ino = ino & IMAP_INO;
+ dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
- printk("sun4v_vintr_set_target(%lx,%lx,%lu): "
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
- printk("sun4v_vintr_set_state(%lx,%lx,"
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_STATE_IDLE): err(%d)\n",
dev_handle, dev_ino, err);
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_ENABLED);
if (err != HV_EOK)
- printk("sun4v_vintr_set_state(%lx,%lx,"
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_ENABLED): err(%d)\n",
dev_handle, dev_ino, err);
}
@@ -452,7 +474,6 @@ static void sun4v_virq_enable(unsigned int virt_irq)
static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
if (likely(bucket)) {
unsigned long cpuid, dev_handle, dev_ino;
@@ -460,12 +481,12 @@ static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
cpuid = irq_choose_cpu(virt_irq);
- dev_handle = ino & IMAP_IGN;
- dev_ino = ino & IMAP_INO;
+ dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
- printk("sun4v_vintr_set_target(%lx,%lx,%lu): "
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
}
@@ -474,19 +495,18 @@ static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
static void sun4v_virq_disable(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
if (likely(bucket)) {
unsigned long dev_handle, dev_ino;
int err;
- dev_handle = ino & IMAP_IGN;
- dev_ino = ino & IMAP_INO;
+ dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
if (err != HV_EOK)
- printk("sun4v_vintr_set_state(%lx,%lx,"
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_DISABLED): err(%d)\n",
dev_handle, dev_ino, err);
}
@@ -495,7 +515,6 @@ static void sun4v_virq_disable(unsigned int virt_irq)
static void sun4v_virq_end(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
struct irq_desc *desc = irq_desc + virt_irq;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@@ -505,13 +524,13 @@ static void sun4v_virq_end(unsigned int virt_irq)
unsigned long dev_handle, dev_ino;
int err;
- dev_handle = ino & IMAP_IGN;
- dev_ino = ino & IMAP_INO;
+ dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
- printk("sun4v_vintr_set_state(%lx,%lx,"
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_STATE_IDLE): err(%d)\n",
dev_handle, dev_ino, err);
}
@@ -700,11 +719,12 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
unsigned long sysino, hv_err;
+ unsigned int virq;
- BUG_ON(devhandle & ~IMAP_IGN);
- BUG_ON(devino & ~IMAP_INO);
+ BUG_ON(devhandle & devino);
sysino = devhandle | devino;
+ BUG_ON(sysino & ~(IMAP_IGN | IMAP_INO));
hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
if (hv_err) {
@@ -713,7 +733,12 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
prom_halt();
}
- return sun4v_build_common(sysino, &sun4v_virq);
+ virq = sun4v_build_common(sysino, &sun4v_virq);
+
+ virt_to_real_irq_table[virq].dev_handle = devhandle;
+ virt_to_real_irq_table[virq].dev_ino = devino;
+
+ return virq;
}
#ifdef CONFIG_PCI_MSI
@@ -735,7 +760,7 @@ unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
break;
}
if (devino >= msi_end)
- return 0;
+ return -ENOSPC;
sysino = sun4v_devino_to_sysino(devhandle, devino);
bucket = &ivector_table[sysino];
@@ -749,8 +774,8 @@ unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!data)) {
- prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
- prom_halt();
+ virt_irq_free(*virt_irq_p);
+ return -ENOMEM;
}
set_irq_chip_data(bucket->virt_irq, data);
@@ -923,7 +948,7 @@ static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type
}
}
-static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
+void __cpuinit sun4v_register_mondo_queues(int this_cpu)
{
struct trap_per_cpu *tb = &trap_block[this_cpu];
@@ -937,20 +962,10 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
tb->nonresum_qmask);
}
-static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
+static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
- unsigned long order = get_order(size);
- void *p = NULL;
-
- if (use_bootmem) {
- p = __alloc_bootmem_low(size, size, 0);
- } else {
- struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
- if (page)
- p = page_address(page);
- }
-
+ void *p = __alloc_bootmem_low(size, size, 0);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
prom_halt();
@@ -959,19 +974,10 @@ static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask
*pa_ptr = __pa(p);
}
-static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
+static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
- unsigned long order = get_order(size);
- void *p = NULL;
-
- if (use_bootmem) {
- p = __alloc_bootmem_low(size, size, 0);
- } else {
- struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
- if (page)
- p = page_address(page);
- }
+ void *p = __alloc_bootmem_low(size, size, 0);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
@@ -981,18 +987,14 @@ static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask,
*pa_ptr = __pa(p);
}
-static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
+static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
void *page;
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
- if (use_bootmem)
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- else
- page = (void *) get_zeroed_page(GFP_ATOMIC);
-
+ page = alloc_bootmem_low_pages(PAGE_SIZE);
if (!page) {
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
prom_halt();
@@ -1003,30 +1005,27 @@ static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_
#endif
}
-/* Allocate and register the mondo and error queues for this cpu. */
-void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
+/* Allocate mondo and error queues for all possible cpus. */
+static void __init sun4v_init_mondo_queues(void)
{
- struct trap_per_cpu *tb = &trap_block[cpu];
+ int cpu;
- if (alloc) {
- alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
- alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
- alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
- alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
- alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
- alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
+ for_each_possible_cpu(cpu) {
+ struct trap_per_cpu *tb = &trap_block[cpu];
- init_cpu_send_mondo_info(tb, use_bootmem);
- }
+ alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
+ alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
+ alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
+ alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
+ alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
+ alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
+ tb->nonresum_qmask);
- if (load) {
- if (cpu != hard_smp_processor_id()) {
- prom_printf("SUN4V: init mondo on cpu %d not %d\n",
- cpu, hard_smp_processor_id());
- prom_halt();
- }
- sun4v_register_mondo_queues(cpu);
+ init_cpu_send_mondo_info(tb);
}
+
+ /* Load up the boot cpu's entries. */
+ sun4v_register_mondo_queues(hard_smp_processor_id());
}
static struct irqaction timer_irq_action = {
@@ -1041,7 +1040,7 @@ void __init init_IRQ(void)
memset(&ivector_table[0], 0, sizeof(ivector_table));
if (tlb_type == hypervisor)
- sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
+ sun4v_init_mondo_queues();
/* We need to clear any IRQ's pending in the soft interrupt
* registers, a spurious one could be left around from the