diff options
Diffstat (limited to 'drivers/irqchip')
-rw-r--r-- | drivers/irqchip/irq-armada-370-xp.c | 202 | ||||
-rw-r--r-- | drivers/irqchip/irq-bcm2835.c | 22 | ||||
-rw-r--r-- | drivers/irqchip/irq-gic.c | 151 | ||||
-rw-r--r-- | drivers/irqchip/irq-vic.c | 7 |
4 files changed, 361 insertions, 21 deletions
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index bb328a36612..433cc8568de 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -21,7 +21,10 @@ #include <linux/io.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/of_pci.h> #include <linux/irqdomain.h> +#include <linux/slab.h> +#include <linux/msi.h> #include <asm/mach/arch.h> #include <asm/exception.h> #include <asm/smp_plat.h> @@ -51,12 +54,22 @@ #define IPI_DOORBELL_START (0) #define IPI_DOORBELL_END (8) #define IPI_DOORBELL_MASK 0xFF +#define PCI_MSI_DOORBELL_START (16) +#define PCI_MSI_DOORBELL_NR (16) +#define PCI_MSI_DOORBELL_END (32) +#define PCI_MSI_DOORBELL_MASK 0xFFFF0000 static DEFINE_RAW_SPINLOCK(irq_controller_lock); static void __iomem *per_cpu_int_base; static void __iomem *main_int_base; static struct irq_domain *armada_370_xp_mpic_domain; +#ifdef CONFIG_PCI_MSI +static struct irq_domain *armada_370_xp_msi_domain; +static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); +static DEFINE_MUTEX(msi_used_lock); +static phys_addr_t msi_doorbell_addr; +#endif /* * In SMP mode: @@ -87,6 +100,144 @@ static void armada_370_xp_irq_unmask(struct irq_data *d) ARMADA_370_XP_INT_CLEAR_MASK_OFFS); } +#ifdef CONFIG_PCI_MSI + +static int armada_370_xp_alloc_msi(void) +{ + int hwirq; + + mutex_lock(&msi_used_lock); + hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR); + if (hwirq >= PCI_MSI_DOORBELL_NR) + hwirq = -ENOSPC; + else + set_bit(hwirq, msi_used); + mutex_unlock(&msi_used_lock); + + return hwirq; +} + +static void armada_370_xp_free_msi(int hwirq) +{ + mutex_lock(&msi_used_lock); + if (!test_bit(hwirq, msi_used)) + pr_err("trying to free unused MSI#%d\n", hwirq); + else + clear_bit(hwirq, msi_used); + mutex_unlock(&msi_used_lock); +} + +static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, + struct pci_dev *pdev, + struct msi_desc *desc) +{ + struct msi_msg msg; + irq_hw_number_t hwirq; + int virq; + + hwirq = armada_370_xp_alloc_msi(); + if (hwirq < 0) + return hwirq; + + virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq); + if (!virq) { + armada_370_xp_free_msi(hwirq); + return -EINVAL; + } + + irq_set_msi_desc(virq, desc); + + msg.address_lo = msi_doorbell_addr; + msg.address_hi = 0; + msg.data = 0xf00 | (hwirq + 16); + + write_msi_msg(virq, &msg); + return 0; +} + +static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, + unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + irq_dispose_mapping(irq); + armada_370_xp_free_msi(d->hwirq); +} + +static struct irq_chip armada_370_xp_msi_irq_chip = { + .name = "armada_370_xp_msi_irq", + .irq_enable = unmask_msi_irq, + .irq_disable = mask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, +}; + +static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, + handle_simple_irq); + set_irq_flags(virq, IRQF_VALID); + + return 0; +} + +static const struct irq_domain_ops armada_370_xp_msi_irq_ops = { + .map = armada_370_xp_msi_map, +}; + +static int armada_370_xp_msi_init(struct device_node *node, + phys_addr_t main_int_phys_base) +{ + struct msi_chip *msi_chip; + u32 reg; + int ret; + + msi_doorbell_addr = main_int_phys_base + + ARMADA_370_XP_SW_TRIG_INT_OFFS; + + msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL); + if (!msi_chip) + return -ENOMEM; + + msi_chip->setup_irq = armada_370_xp_setup_msi_irq; + msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; + msi_chip->of_node = node; + + armada_370_xp_msi_domain = + irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR, + &armada_370_xp_msi_irq_ops, + NULL); + if (!armada_370_xp_msi_domain) { + kfree(msi_chip); + return -ENOMEM; + } + + ret = of_pci_msi_chip_add(msi_chip); + if (ret < 0) { + irq_domain_remove(armada_370_xp_msi_domain); + kfree(msi_chip); + return ret; + } + + reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS) + | PCI_MSI_DOORBELL_MASK; + + writel(reg, per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + + /* Unmask IPI interrupt */ + writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + + return 0; +} +#else +static inline int armada_370_xp_msi_init(struct device_node *node, + phys_addr_t main_int_phys_base) +{ + return 0; +} +#endif + #ifdef CONFIG_SMP static int armada_xp_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) @@ -214,12 +365,39 @@ armada_370_xp_handle_irq(struct pt_regs *regs) if (irqnr > 1022) break; - if (irqnr > 0) { + if (irqnr > 1) { irqnr = irq_find_mapping(armada_370_xp_mpic_domain, irqnr); handle_IRQ(irqnr, regs); continue; } + +#ifdef CONFIG_PCI_MSI + /* MSI handling */ + if (irqnr == 1) { + u32 msimask, msinr; + + msimask = readl_relaxed(per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) + & PCI_MSI_DOORBELL_MASK; + + writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); + + for (msinr = PCI_MSI_DOORBELL_START; + msinr < PCI_MSI_DOORBELL_END; msinr++) { + int irq; + + if (!(msimask & BIT(msinr))) + continue; + + irq = irq_find_mapping(armada_370_xp_msi_domain, + msinr - 16); + handle_IRQ(irq, regs); + } + } +#endif + #ifdef CONFIG_SMP /* IPI Handling */ if (irqnr == 0) { @@ -248,12 +426,25 @@ armada_370_xp_handle_irq(struct pt_regs *regs) static int __init armada_370_xp_mpic_of_init(struct device_node *node, struct device_node *parent) { + struct resource main_int_res, per_cpu_int_res; u32 control; - main_int_base = of_iomap(node, 0); - per_cpu_int_base = of_iomap(node, 1); + BUG_ON(of_address_to_resource(node, 0, &main_int_res)); + BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res)); + + BUG_ON(!request_mem_region(main_int_res.start, + resource_size(&main_int_res), + node->full_name)); + BUG_ON(!request_mem_region(per_cpu_int_res.start, + resource_size(&per_cpu_int_res), + node->full_name)); + main_int_base = ioremap(main_int_res.start, + resource_size(&main_int_res)); BUG_ON(!main_int_base); + + per_cpu_int_base = ioremap(per_cpu_int_res.start, + resource_size(&per_cpu_int_res)); BUG_ON(!per_cpu_int_base); control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); @@ -262,8 +453,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, irq_domain_add_linear(node, (control >> 2) & 0x3ff, &armada_370_xp_mpic_irq_ops, NULL); - if (!armada_370_xp_mpic_domain) - panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n"); + BUG_ON(!armada_370_xp_mpic_domain); irq_set_default_host(armada_370_xp_mpic_domain); @@ -280,6 +470,8 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, #endif + armada_370_xp_msi_init(node, main_int_res.start); + set_handle_irq(armada_370_xp_handle_irq); return 0; diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index 16c78f1c5ef..1693b8e7f26 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c @@ -49,9 +49,11 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/irqdomain.h> -#include <linux/irqchip/bcm2835.h> #include <asm/exception.h> +#include <asm/mach/irq.h> + +#include "irqchip.h" /* Put the bank and irq (32 bits) into the hwirq */ #define MAKE_HWIRQ(b, n) ((b << 5) | (n)) @@ -93,6 +95,8 @@ struct armctrl_ic { }; static struct armctrl_ic intc __read_mostly; +static asmlinkage void __exception_irq_entry bcm2835_handle_irq( + struct pt_regs *regs); static void armctrl_mask_irq(struct irq_data *d) { @@ -164,17 +168,9 @@ static int __init armctrl_of_init(struct device_node *node, set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } - return 0; -} - -static struct of_device_id irq_of_match[] __initconst = { - { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init }, - { } -}; -void __init bcm2835_init_irq(void) -{ - of_irq_init(irq_of_match); + set_handle_irq(bcm2835_handle_irq); + return 0; } /* @@ -200,7 +196,7 @@ static void armctrl_handle_shortcut(int bank, struct pt_regs *regs, handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); } -asmlinkage void __exception_irq_entry bcm2835_handle_irq( +static asmlinkage void __exception_irq_entry bcm2835_handle_irq( struct pt_regs *regs) { u32 stat, irq; @@ -222,3 +218,5 @@ asmlinkage void __exception_irq_entry bcm2835_handle_irq( } } } + +IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", armctrl_of_init); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index d0e948084ea..9031171c141 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -253,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; + raw_spin_lock(&irq_controller_lock); mask = 0xff << shift; bit = gic_cpu_map[cpu] << shift; - - raw_spin_lock(&irq_controller_lock); val = readl_relaxed(reg) & ~mask; writel_relaxed(val | bit, reg); raw_spin_unlock(&irq_controller_lock); @@ -652,7 +651,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic) void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) { int cpu; - unsigned long map = 0; + unsigned long flags, map = 0; + + raw_spin_lock_irqsave(&irq_controller_lock, flags); /* Convert our logical CPU mask into a physical one. */ for_each_cpu(cpu, mask) @@ -666,7 +667,149 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) /* this always happens on GIC0 */ writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); + + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); +} +#endif + +#ifdef CONFIG_BL_SWITCHER +/* + * gic_send_sgi - send a SGI directly to given CPU interface number + * + * cpu_id: the ID for the destination CPU interface + * irq: the IPI number to send a SGI for + */ +void gic_send_sgi(unsigned int cpu_id, unsigned int irq) +{ + BUG_ON(cpu_id >= NR_GIC_CPU_IF); + cpu_id = 1 << cpu_id; + /* this always happens on GIC0 */ + writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); +} + +/* + * gic_get_cpu_id - get the CPU interface ID for the specified CPU + * + * @cpu: the logical CPU number to get the GIC ID for. + * + * Return the CPU interface ID for the given logical CPU number, + * or -1 if the CPU number is too large or the interface ID is + * unknown (more than one bit set). + */ +int gic_get_cpu_id(unsigned int cpu) +{ + unsigned int cpu_bit; + + if (cpu >= NR_GIC_CPU_IF) + return -1; + cpu_bit = gic_cpu_map[cpu]; + if (cpu_bit & (cpu_bit - 1)) + return -1; + return __ffs(cpu_bit); } + +/* + * gic_migrate_target - migrate IRQs to another CPU interface + * + * @new_cpu_id: the CPU target ID to migrate IRQs to + * + * Migrate all peripheral interrupts with a target matching the current CPU + * to the interface corresponding to @new_cpu_id. The CPU interface mapping + * is also updated. Targets to other CPU interfaces are unchanged. + * This must be called with IRQs locally disabled. + */ +void gic_migrate_target(unsigned int new_cpu_id) +{ + unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; + void __iomem *dist_base; + int i, ror_val, cpu = smp_processor_id(); + u32 val, cur_target_mask, active_mask; + + if (gic_nr >= MAX_GIC_NR) + BUG(); + + dist_base = gic_data_dist_base(&gic_data[gic_nr]); + if (!dist_base) + return; + gic_irqs = gic_data[gic_nr].gic_irqs; + + cur_cpu_id = __ffs(gic_cpu_map[cpu]); + cur_target_mask = 0x01010101 << cur_cpu_id; + ror_val = (cur_cpu_id - new_cpu_id) & 31; + + raw_spin_lock(&irq_controller_lock); + + /* Update the target interface for this logical CPU */ + gic_cpu_map[cpu] = 1 << new_cpu_id; + + /* + * Find all the peripheral interrupts targetting the current + * CPU interface and migrate them to the new CPU interface. + * We skip DIST_TARGET 0 to 7 as they are read-only. + */ + for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { + val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); + active_mask = val & cur_target_mask; + if (active_mask) { + val &= ~active_mask; + val |= ror32(active_mask, ror_val); + writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); + } + } + + raw_spin_unlock(&irq_controller_lock); + + /* + * Now let's migrate and clear any potential SGIs that might be + * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET + * is a banked register, we can only forward the SGI using + * GIC_DIST_SOFTINT. The original SGI source is lost but Linux + * doesn't use that information anyway. + * + * For the same reason we do not adjust SGI source information + * for previously sent SGIs by us to other CPUs either. + */ + for (i = 0; i < 16; i += 4) { + int j; + val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); + if (!val) + continue; + writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); + for (j = i; j < i + 4; j++) { + if (val & 0xff) + writel_relaxed((1 << (new_cpu_id + 16)) | j, + dist_base + GIC_DIST_SOFTINT); + val >>= 8; + } + } +} + +/* + * gic_get_sgir_physaddr - get the physical address for the SGI register + * + * REturn the physical address of the SGI register to be used + * by some early assembly code when the kernel is not yet available. + */ +static unsigned long gic_dist_physaddr; + +unsigned long gic_get_sgir_physaddr(void) +{ + if (!gic_dist_physaddr) + return 0; + return gic_dist_physaddr + GIC_DIST_SOFTINT; +} + +void __init gic_init_physaddr(struct device_node *node) +{ + struct resource res; + if (of_address_to_resource(node, 0, &res) == 0) { + gic_dist_physaddr = res.start; + pr_info("GIC physical location is %#lx\n", gic_dist_physaddr); + } +} + +#else +#define gic_init_physaddr(node) do { } while (0) #endif static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, @@ -850,6 +993,8 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent) percpu_offset = 0; gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); + if (!gic_cnt) + gic_init_physaddr(node); if (parent) { irq = irq_of_parse_and_map(node, 0); diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 2bbb00404cf..8e21ae0bab4 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -469,6 +469,8 @@ void __init vic_init(void __iomem *base, unsigned int irq_start, int __init vic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *regs; + u32 interrupt_mask = ~0; + u32 wakeup_mask = ~0; if (WARN(parent, "non-root VICs are not supported")) return -EINVAL; @@ -477,10 +479,13 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent) if (WARN_ON(!regs)) return -EIO; + of_property_read_u32(node, "valid-mask", &interrupt_mask); + of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask); + /* * Passing 0 as first IRQ makes the simple domain allocate descriptors */ - __vic_init(regs, 0, ~0, ~0, node); + __vic_init(regs, 0, interrupt_mask, wakeup_mask, node); return 0; } |