diff options
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/Kconfig | 12 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 1 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 10 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 2 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_v2.c | 186 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 4 | ||||
-rw-r--r-- | drivers/iommu/fsl_pamu.c | 3 | ||||
-rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 1255 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu_dev.c | 38 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 31 | ||||
-rw-r--r-- | drivers/iommu/omap-iopgtable.h | 3 | ||||
-rw-r--r-- | drivers/iommu/shmobile-ipmmu.c | 20 |
12 files changed, 1375 insertions, 190 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 422807ff099..d260605e6d5 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -272,6 +272,18 @@ config SHMOBILE_IOMMU_L1SIZE default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB +config IPMMU_VMSA + bool "Renesas VMSA-compatible IPMMU" + depends on ARM_LPAE + depends on ARCH_SHMOBILE || COMPILE_TEST + select IOMMU_API + select ARM_DMA_USE_IOMMU + help + Support for the Renesas VMSA-compatible IPMMU Renesas found in the + R-Mobile APE6 and R-Car H2/M2 SoCs. + + If unsure, say N. + config SPAPR_TCE_IOMMU bool "sPAPR TCE IOMMU Support" depends on PPC_POWERNV || PPC_PSERIES diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 5d58bf16e9e..8893bad048e 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o +obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index c949520bd19..4aec6a29e31 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3499,8 +3499,6 @@ int __init amd_iommu_init_passthrough(void) { struct iommu_dev_data *dev_data; struct pci_dev *dev = NULL; - struct amd_iommu *iommu; - u16 devid; int ret; ret = alloc_passthrough_domain(); @@ -3514,12 +3512,6 @@ int __init amd_iommu_init_passthrough(void) dev_data = get_dev_data(&dev->dev); dev_data->passthrough = true; - devid = get_device_id(&dev->dev); - - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - continue; - attach_device(&dev->dev, pt_domain); } @@ -3999,7 +3991,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) iommu_flush_dte(iommu, devid); if (devid != alias) { irq_lookup_table[alias] = table; - set_dte_irq_entry(devid, table); + set_dte_irq_entry(alias, table); iommu_flush_dte(iommu, alias); } diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index b76c58dbe30..0e08545d729 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) * per device. But we can enable the exclusion range per * device. This is done here */ - set_dev_entry_bit(m->devid, DEV_ENTRY_EX); + set_dev_entry_bit(devid, DEV_ENTRY_EX); iommu->exclusion_start = m->range_start; iommu->exclusion_length = m->range_length; } diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 5208828792e..d4daa05efe6 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -45,6 +45,8 @@ struct pri_queue { struct pasid_state { struct list_head list; /* For global state-list */ atomic_t count; /* Reference count */ + atomic_t mmu_notifier_count; /* Counting nested mmu_notifier + calls */ struct task_struct *task; /* Task bound to this PASID */ struct mm_struct *mm; /* mm_struct for the faults */ struct mmu_notifier mn; /* mmu_otifier handle */ @@ -56,6 +58,8 @@ struct pasid_state { }; struct device_state { + struct list_head list; + u16 devid; atomic_t count; struct pci_dev *pdev; struct pasid_state **states; @@ -81,13 +85,9 @@ struct fault { u16 flags; }; -static struct device_state **state_table; +static LIST_HEAD(state_list); static spinlock_t state_lock; -/* List and lock for all pasid_states */ -static LIST_HEAD(pasid_state_list); -static DEFINE_SPINLOCK(ps_lock); - static struct workqueue_struct *iommu_wq; /* @@ -99,7 +99,6 @@ static u64 *empty_page_table; static void free_pasid_states(struct device_state *dev_state); static void unbind_pasid(struct device_state *dev_state, int pasid); -static int task_exit(struct notifier_block *nb, unsigned long e, void *data); static u16 device_id(struct pci_dev *pdev) { @@ -111,13 +110,25 @@ static u16 device_id(struct pci_dev *pdev) return devid; } +static struct device_state *__get_device_state(u16 devid) +{ + struct device_state *dev_state; + + list_for_each_entry(dev_state, &state_list, list) { + if (dev_state->devid == devid) + return dev_state; + } + + return NULL; +} + static struct device_state *get_device_state(u16 devid) { struct device_state *dev_state; unsigned long flags; spin_lock_irqsave(&state_lock, flags); - dev_state = state_table[devid]; + dev_state = __get_device_state(devid); if (dev_state != NULL) atomic_inc(&dev_state->count); spin_unlock_irqrestore(&state_lock, flags); @@ -158,29 +169,6 @@ static void put_device_state_wait(struct device_state *dev_state) free_device_state(dev_state); } -static struct notifier_block profile_nb = { - .notifier_call = task_exit, -}; - -static void link_pasid_state(struct pasid_state *pasid_state) -{ - spin_lock(&ps_lock); - list_add_tail(&pasid_state->list, &pasid_state_list); - spin_unlock(&ps_lock); -} - -static void __unlink_pasid_state(struct pasid_state *pasid_state) -{ - list_del(&pasid_state->list); -} - -static void unlink_pasid_state(struct pasid_state *pasid_state) -{ - spin_lock(&ps_lock); - __unlink_pasid_state(pasid_state); - spin_unlock(&ps_lock); -} - /* Must be called under dev_state->lock */ static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, int pasid, bool alloc) @@ -337,7 +325,6 @@ static void unbind_pasid(struct device_state *dev_state, int pasid) if (pasid_state == NULL) return; - unlink_pasid_state(pasid_state); __unbind_pasid(pasid_state); put_pasid_state_wait(pasid_state); /* Reference taken in this function */ } @@ -379,7 +366,12 @@ static void free_pasid_states(struct device_state *dev_state) continue; put_pasid_state(pasid_state); - unbind_pasid(dev_state, i); + + /* + * This will call the mn_release function and + * unbind the PASID + */ + mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); } if (dev_state->pasid_levels == 2) @@ -443,8 +435,11 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn, pasid_state = mn_to_state(mn); dev_state = pasid_state->device_state; - amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid, - __pa(empty_page_table)); + if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) { + amd_iommu_domain_set_gcr3(dev_state->domain, + pasid_state->pasid, + __pa(empty_page_table)); + } } static void mn_invalidate_range_end(struct mmu_notifier *mn, @@ -457,11 +452,31 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn, pasid_state = mn_to_state(mn); dev_state = pasid_state->device_state; - amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid, - __pa(pasid_state->mm->pgd)); + if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) { + amd_iommu_domain_set_gcr3(dev_state->domain, + pasid_state->pasid, + __pa(pasid_state->mm->pgd)); + } +} + +static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) +{ + struct pasid_state *pasid_state; + struct device_state *dev_state; + + might_sleep(); + + pasid_state = mn_to_state(mn); + dev_state = pasid_state->device_state; + + if (pasid_state->device_state->inv_ctx_cb) + dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); + + unbind_pasid(dev_state, pasid_state->pasid); } static struct mmu_notifier_ops iommu_mn = { + .release = mn_release, .clear_flush_young = mn_clear_flush_young, .change_pte = mn_change_pte, .invalidate_page = mn_invalidate_page, @@ -504,8 +519,10 @@ static void do_fault(struct work_struct *work) write = !!(fault->flags & PPR_FAULT_WRITE); + down_read(&fault->state->mm->mmap_sem); npages = get_user_pages(fault->state->task, fault->state->mm, fault->address, 1, write, 0, &page, NULL); + up_read(&fault->state->mm->mmap_sem); if (npages == 1) { put_page(page); @@ -604,53 +621,6 @@ static struct notifier_block ppr_nb = { .notifier_call = ppr_notifier, }; -static int task_exit(struct notifier_block *nb, unsigned long e, void *data) -{ - struct pasid_state *pasid_state; - struct task_struct *task; - - task = data; - - /* - * Using this notifier is a hack - but there is no other choice - * at the moment. What I really want is a sleeping notifier that - * is called when an MM goes down. But such a notifier doesn't - * exist yet. The notifier needs to sleep because it has to make - * sure that the device does not use the PASID and the address - * space anymore before it is destroyed. This includes waiting - * for pending PRI requests to pass the workqueue. The - * MMU-Notifiers would be a good fit, but they use RCU and so - * they are not allowed to sleep. Lets see how we can solve this - * in a more intelligent way in the future. - */ -again: - spin_lock(&ps_lock); - list_for_each_entry(pasid_state, &pasid_state_list, list) { - struct device_state *dev_state; - int pasid; - - if (pasid_state->task != task) - continue; - - /* Drop Lock and unbind */ - spin_unlock(&ps_lock); - - dev_state = pasid_state->device_state; - pasid = pasid_state->pasid; - - if (pasid_state->device_state->inv_ctx_cb) - dev_state->inv_ctx_cb(dev_state->pdev, pasid); - - unbind_pasid(dev_state, pasid); - - /* Task may be in the list multiple times */ - goto again; - } - spin_unlock(&ps_lock); - - return NOTIFY_OK; -} - int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, struct task_struct *task) { @@ -680,6 +650,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, goto out; atomic_set(&pasid_state->count, 1); + atomic_set(&pasid_state->mmu_notifier_count, 0); init_waitqueue_head(&pasid_state->wq); spin_lock_init(&pasid_state->lock); @@ -703,8 +674,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, if (ret) goto out_clear_state; - link_pasid_state(pasid_state); - return 0; out_clear_state: @@ -725,6 +694,7 @@ EXPORT_SYMBOL(amd_iommu_bind_pasid); void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid) { + struct pasid_state *pasid_state; struct device_state *dev_state; u16 devid; @@ -741,7 +711,17 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid) if (pasid < 0 || pasid >= dev_state->max_pasids) goto out; - unbind_pasid(dev_state, pasid); + pasid_state = get_pasid_state(dev_state, pasid); + if (pasid_state == NULL) + goto out; + /* + * Drop reference taken here. We are safe because we still hold + * the reference taken in the amd_iommu_bind_pasid function. + */ + put_pasid_state(pasid_state); + + /* This will call the mn_release function and unbind the PASID */ + mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); out: put_device_state(dev_state); @@ -771,7 +751,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) spin_lock_init(&dev_state->lock); init_waitqueue_head(&dev_state->wq); - dev_state->pdev = pdev; + dev_state->pdev = pdev; + dev_state->devid = devid; tmp = pasids; for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9) @@ -801,13 +782,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) spin_lock_irqsave(&state_lock, flags); - if (state_table[devid] != NULL) { + if (__get_device_state(devid) != NULL) { spin_unlock_irqrestore(&state_lock, flags); ret = -EBUSY; goto out_free_domain; } - state_table[devid] = dev_state; + list_add_tail(&dev_state->list, &state_list); spin_unlock_irqrestore(&state_lock, flags); @@ -839,13 +820,13 @@ void amd_iommu_free_device(struct pci_dev *pdev) spin_lock_irqsave(&state_lock, flags); - dev_state = state_table[devid]; + dev_state = __get_device_state(devid); if (dev_state == NULL) { spin_unlock_irqrestore(&state_lock, flags); return; } - state_table[devid] = NULL; + list_del(&dev_state->list); spin_unlock_irqrestore(&state_lock, flags); @@ -872,7 +853,7 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, spin_lock_irqsave(&state_lock, flags); ret = -EINVAL; - dev_state = state_table[devid]; + dev_state = __get_device_state(devid); if (dev_state == NULL) goto out_unlock; @@ -903,7 +884,7 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, spin_lock_irqsave(&state_lock, flags); ret = -EINVAL; - dev_state = state_table[devid]; + dev_state = __get_device_state(devid); if (dev_state == NULL) goto out_unlock; @@ -920,7 +901,6 @@ EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb); static int __init amd_iommu_v2_init(void) { - size_t state_table_size; int ret; pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n"); @@ -936,16 +916,10 @@ static int __init amd_iommu_v2_init(void) spin_lock_init(&state_lock); - state_table_size = MAX_DEVICES * sizeof(struct device_state *); - state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(state_table_size)); - if (state_table == NULL) - return -ENOMEM; - ret = -ENOMEM; iommu_wq = create_workqueue("amd_iommu_v2"); if (iommu_wq == NULL) - goto out_free; + goto out; ret = -ENOMEM; empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL); @@ -953,29 +927,24 @@ static int __init amd_iommu_v2_init(void) goto out_destroy_wq; amd_iommu_register_ppr_notifier(&ppr_nb); - profile_event_register(PROFILE_TASK_EXIT, &profile_nb); return 0; out_destroy_wq: destroy_workqueue(iommu_wq); -out_free: - free_pages((unsigned long)state_table, get_order(state_table_size)); - +out: return ret; } static void __exit amd_iommu_v2_exit(void) { struct device_state *dev_state; - size_t state_table_size; int i; if (!amd_iommu_v2_supported()) return; - profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb); amd_iommu_unregister_ppr_notifier(&ppr_nb); flush_workqueue(iommu_wq); @@ -998,9 +967,6 @@ static void __exit amd_iommu_v2_exit(void) destroy_workqueue(iommu_wq); - state_table_size = MAX_DEVICES * sizeof(struct device_state *); - free_pages((unsigned long)state_table, get_order(state_table_size)); - free_page((unsigned long)empty_page_table); } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 647c3c7fd74..1599354e974 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1167,7 +1167,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, for (i = 0; i < master->num_streamids; ++i) { u32 idx, s2cr; idx = master->smrs ? master->smrs[i].idx : master->streamids[i]; - s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) | + s2cr = S2CR_TYPE_TRANS | (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT); writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); } @@ -1804,7 +1804,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) * allocation (PTRS_PER_PGD). */ #ifdef CONFIG_64BIT - smmu->s1_output_size = min(39UL, size); + smmu->s1_output_size = min((unsigned long)VA_BITS, size); #else smmu->s1_output_size = min(32UL, size); #endif diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index cba0498eb01..b99dd88e31b 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -592,8 +592,7 @@ found_cpu_node: /* advance to next node in cache hierarchy */ node = of_find_node_by_phandle(*prop); if (!node) { - pr_debug("Invalid node for cache hierarchy %s\n", - node->full_name); + pr_debug("Invalid node for cache hierarchy\n"); return ~(u32)0; } } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c new file mode 100644 index 00000000000..53cde086e83 --- /dev/null +++ b/drivers/iommu/ipmmu-vmsa.c @@ -0,0 +1,1255 @@ +/* + * IPMMU VMSA + * + * Copyright (C) 2014 Renesas Electronics Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/platform_data/ipmmu-vmsa.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> +#include <linux/slab.h> + +#include <asm/dma-iommu.h> +#include <asm/pgalloc.h> + +struct ipmmu_vmsa_device { + struct device *dev; + void __iomem *base; + struct list_head list; + + const struct ipmmu_vmsa_platform_data *pdata; + unsigned int num_utlbs; + + struct dma_iommu_mapping *mapping; +}; + +struct ipmmu_vmsa_domain { + struct ipmmu_vmsa_device *mmu; + struct iommu_domain *io_domain; + + unsigned int context_id; + spinlock_t lock; /* Protects mappings */ + pgd_t *pgd; +}; + +struct ipmmu_vmsa_archdata { + struct ipmmu_vmsa_device *mmu; + unsigned int utlb; +}; + +static DEFINE_SPINLOCK(ipmmu_devices_lock); +static LIST_HEAD(ipmmu_devices); + +#define TLB_LOOP_TIMEOUT 100 /* 100us */ + +/* ----------------------------------------------------------------------------- + * Registers Definition + */ + +#define IM_CTX_SIZE 0x40 + +#define IMCTR 0x0000 +#define IMCTR_TRE (1 << 17) +#define IMCTR_AFE (1 << 16) +#define IMCTR_RTSEL_MASK (3 << 4) +#define IMCTR_RTSEL_SHIFT 4 +#define IMCTR_TREN (1 << 3) +#define IMCTR_INTEN (1 << 2) +#define IMCTR_FLUSH (1 << 1) +#define IMCTR_MMUEN (1 << 0) + +#define IMCAAR 0x0004 + +#define IMTTBCR 0x0008 +#define IMTTBCR_EAE (1 << 31) +#define IMTTBCR_PMB (1 << 30) +#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) +#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) +#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) +#define IMTTBCR_SH1_MASK (3 << 28) +#define IMTTBCR_ORGN1_NC (0 << 26) +#define IMTTBCR_ORGN1_WB_WA (1 << 26) +#define IMTTBCR_ORGN1_WT (2 << 26) +#define IMTTBCR_ORGN1_WB (3 << 26) +#define IMTTBCR_ORGN1_MASK (3 << 26) +#define IMTTBCR_IRGN1_NC (0 << 24) +#define IMTTBCR_IRGN1_WB_WA (1 << 24) +#define IMTTBCR_IRGN1_WT (2 << 24) +#define IMTTBCR_IRGN1_WB (3 << 24) +#define IMTTBCR_IRGN1_MASK (3 << 24) +#define IMTTBCR_TSZ1_MASK (7 << 16) +#define IMTTBCR_TSZ1_SHIFT 16 +#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) +#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) +#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) +#define IMTTBCR_SH0_MASK (3 << 12) +#define IMTTBCR_ORGN0_NC (0 << 10) +#define IMTTBCR_ORGN0_WB_WA (1 << 10) +#define IMTTBCR_ORGN0_WT (2 << 10) +#define IMTTBCR_ORGN0_WB (3 << 10) +#define IMTTBCR_ORGN0_MASK (3 << 10) +#define IMTTBCR_IRGN0_NC (0 << 8) +#define IMTTBCR_IRGN0_WB_WA (1 << 8) +#define IMTTBCR_IRGN0_WT (2 << 8) +#define IMTTBCR_IRGN0_WB (3 << 8) +#define IMTTBCR_IRGN0_MASK (3 << 8) +#define IMTTBCR_SL0_LVL_2 (0 << 4) +#define IMTTBCR_SL0_LVL_1 (1 << 4) +#define IMTTBCR_TSZ0_MASK (7 << 0) +#define IMTTBCR_TSZ0_SHIFT O + +#define IMBUSCR 0x000c +#define IMBUSCR_DVM (1 << 2) +#define IMBUSCR_BUSSEL_SYS (0 << 0) +#define IMBUSCR_BUSSEL_CCI (1 << 0) +#define IMBUSCR_BUSSEL_IMCAAR (2 << 0) +#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) +#define IMBUSCR_BUSSEL_MASK (3 << 0) + +#define IMTTLBR0 0x0010 +#define IMTTUBR0 0x0014 +#define IMTTLBR1 0x0018 +#define IMTTUBR1 0x001c + +#define IMSTR 0x0020 +#define IMSTR_ERRLVL_MASK (3 << 12) +#define IMSTR_ERRLVL_SHIFT 12 +#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) +#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) +#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) +#define IMSTR_ERRCODE_MASK (7 << 8) +#define IMSTR_MHIT (1 << 4) +#define IMSTR_ABORT (1 << 2) +#define IMSTR_PF (1 << 1) +#define IMSTR_TF (1 << 0) + +#define IMMAIR0 0x0028 +#define IMMAIR1 0x002c +#define IMMAIR_ATTR_MASK 0xff +#define IMMAIR_ATTR_DEVICE 0x04 +#define IMMAIR_ATTR_NC 0x44 +#define IMMAIR_ATTR_WBRWA 0xff +#define IMMAIR_ATTR_SHIFT(n) ((n) << 3) +#define IMMAIR_ATTR_IDX_NC 0 +#define IMMAIR_ATTR_IDX_WBRWA 1 +#define IMMAIR_ATTR_IDX_DEV 2 + +#define IMEAR 0x0030 + +#define IMPCTR 0x0200 +#define IMPSTR 0x0208 +#define IMPEAR 0x020c +#define IMPMBA(n) (0x0280 + ((n) * 4)) +#define IMPMBD(n) (0x02c0 + ((n) * 4)) + +#define IMUCTR(n) (0x0300 + ((n) * 16)) +#define IMUCTR_FIXADDEN (1 << 31) +#define IMUCTR_FIXADD_MASK (0xff << 16) +#define IMUCTR_FIXADD_SHIFT 16 +#define IMUCTR_TTSEL_MMU(n) ((n) << 4) +#define IMUCTR_TTSEL_PMB (8 << 4) +#define IMUCTR_TTSEL_MASK (15 << 4) +#define IMUCTR_FLUSH (1 << 1) +#define IMUCTR_MMUEN (1 << 0) + +#define IMUASID(n) (0x0308 + ((n) * 16)) +#define IMUASID_ASID8_MASK (0xff << 8) +#define IMUASID_ASID8_SHIFT 8 +#define IMUASID_ASID0_MASK (0xff << 0) +#define IMUASID_ASID0_SHIFT 0 + +/* ----------------------------------------------------------------------------- + * Page Table Bits + */ + +/* + * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access, + * Long-descriptor format" that the NStable bit being set in a table descriptor + * will result in the NStable and NS bits of all child entries being ignored and + * considered as being set. The IPMMU seems not to comply with this, as it + * generates a secure access page fault if any of the NStable and NS bits isn't + * set when running in non-secure mode. + */ +#ifndef PMD_NSTABLE +#define PMD_NSTABLE (_AT(pmdval_t, 1) << 63) +#endif + +#define ARM_VMSA_PTE_XN (((pteval_t)3) << 53) +#define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52) +#define ARM_VMSA_PTE_AF (((pteval_t)1) << 10) +#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8) +#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8) +#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8) +#define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8) +#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5) +#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0) + +/* Stage-1 PTE */ +#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11) +#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6) +#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6) +#define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6) +#define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2) +#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2 + +#define ARM_VMSA_PTE_ATTRS_MASK \ + (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \ + ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \ + ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK) + +#define ARM_VMSA_PTE_CONT_ENTRIES 16 +#define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES) + +#define IPMMU_PTRS_PER_PTE 512 +#define IPMMU_PTRS_PER_PMD 512 +#define IPMMU_PTRS_PER_PGD 4 + +/* ----------------------------------------------------------------------------- + * Read/Write Access + */ + +static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) +{ + return ioread32(mmu->base + offset); +} + +static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, + u32 data) +{ + iowrite32(data, mmu->base + offset); +} + +static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) +{ + return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); +} + +static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, + u32 data) +{ + ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); +} + +/* ----------------------------------------------------------------------------- + * TLB and microTLB Management + */ + +/* Wait for any pending TLB invalidations to complete */ +static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) +{ + unsigned int count = 0; + + while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { + cpu_relax(); + if (++count == TLB_LOOP_TIMEOUT) { + dev_err_ratelimited(domain->mmu->dev, + "TLB sync timed out -- MMU may be deadlocked\n"); + return; + } + udelay(1); + } +} + +static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) +{ + u32 reg; + + reg = ipmmu_ctx_read(domain, IMCTR); + reg |= IMCTR_FLUSH; + ipmmu_ctx_write(domain, IMCTR, reg); + + ipmmu_tlb_sync(domain); +} + +/* + * Enable MMU translation for the microTLB. + */ +static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, + unsigned int utlb) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + + /* + * TODO: Reference-count the microTLB as several bus masters can be + * connected to the same microTLB. + */ + + /* TODO: What should we set the ASID to ? */ + ipmmu_write(mmu, IMUASID(utlb), 0); + /* TODO: Do we need to flush the microTLB ? */ + ipmmu_write(mmu, IMUCTR(utlb), + IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | + IMUCTR_MMUEN); +} + +/* + * Disable MMU translation for the microTLB. + */ +static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, + unsigned int utlb) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + + ipmmu_write(mmu, IMUCTR(utlb), 0); +} + +static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr, + size_t size) +{ + unsigned long offset = (unsigned long)addr & ~PAGE_MASK; + + /* + * TODO: Add support for coherent walk through CCI with DVM and remove + * cache handling. + */ + dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE); +} + +/* ----------------------------------------------------------------------------- + * Domain/Context Management + */ + +static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) +{ + phys_addr_t ttbr; + u32 reg; + + /* + * TODO: When adding support for multiple contexts, find an unused + * context. + */ + domain->context_id = 0; + + /* TTBR0 */ + ipmmu_flush_pgtable(domain->mmu, domain->pgd, + IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd)); + ttbr = __pa(domain->pgd); + ipmmu_ctx_write(domain, IMTTLBR0, ttbr); + ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); + + /* + * TTBCR + * We use long descriptors with inner-shareable WBWA tables and allocate + * the whole 32-bit VA space to TTBR0. + */ + ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | + IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | + IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); + + /* + * MAIR0 + * We need three attributes only, non-cacheable, write-back read/write + * allocate and device memory. + */ + reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC)) + | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA)) + | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV)); + ipmmu_ctx_write(domain, IMMAIR0, reg); + + /* IMBUSCR */ + ipmmu_ctx_write(domain, IMBUSCR, + ipmmu_ctx_read(domain, IMBUSCR) & + ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); + + /* + * IMSTR + * Clear all interrupt flags. + */ + ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); + + /* + * IMCTR + * Enable the MMU and interrupt generation. The long-descriptor + * translation table format doesn't use TEX remapping. Don't enable AF + * software management as we have no use for it. Flush the TLB as + * required when modifying the context registers. + */ + ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); + + return 0; +} + +static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) +{ + /* + * Disable the context. Flush the TLB as required when modifying the + * context registers. + * + * TODO: Is TLB flush really needed ? + */ + ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); + ipmmu_tlb_sync(domain); +} + +/* ----------------------------------------------------------------------------- + * Fault Handling + */ + +static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) +{ + const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; + struct ipmmu_vmsa_device *mmu = domain->mmu; + u32 status; + u32 iova; + + status = ipmmu_ctx_read(domain, IMSTR); + if (!(status & err_mask)) + return IRQ_NONE; + + iova = ipmmu_ctx_read(domain, IMEAR); + + /* + * Clear the error status flags. Unlike traditional interrupt flag + * registers that must be cleared by writing 1, this status register + * seems to require 0. The error address register must be read before, + * otherwise its value will be 0. + */ + ipmmu_ctx_write(domain, IMSTR, 0); + + /* Log fatal errors. */ + if (status & IMSTR_MHIT) + dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", + iova); + if (status & IMSTR_ABORT) + dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", + iova); + + if (!(status & (IMSTR_PF | IMSTR_TF))) + return IRQ_NONE; + + /* + * Try to handle page faults and translation faults. + * + * TODO: We need to look up the faulty device based on the I/O VA. Use + * the IOMMU device for now. + */ + if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0)) + return IRQ_HANDLED; + + dev_err_ratelimited(mmu->dev, + "Unhandled fault: status 0x%08x iova 0x%08x\n", + status, iova); + + return IRQ_HANDLED; +} + +static irqreturn_t ipmmu_irq(int irq, void *dev) +{ + struct ipmmu_vmsa_device *mmu = dev; + struct iommu_domain *io_domain; + struct ipmmu_vmsa_domain *domain; + + if (!mmu->mapping) + return IRQ_NONE; + + io_domain = mmu->mapping->domain; + domain = io_domain->priv; + + return ipmmu_domain_irq(domain); +} + +/* ----------------------------------------------------------------------------- + * Page Table Management + */ + +#define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) + +static void ipmmu_free_ptes(pmd_t *pmd) +{ + pgtable_t table = pmd_pgtable(*pmd); + __free_page(table); +} + +static void ipmmu_free_pmds(pud_t *pud) +{ + pmd_t *pmd = pmd_offset(pud, 0); + pgtable_t table; + unsigned int i; + + for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) { + if (!pmd_table(*pmd)) + continue; + + ipmmu_free_ptes(pmd); + pmd++; + } + + table = pud_pgtable(*pud); + __free_page(table); +} + +static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain) +{ + pgd_t *pgd, *pgd_base = domain->pgd; + unsigned int i; + + /* + * Recursively free the page tables for this domain. We don't care about + * speculative TLB filling, because the TLB will be nuked next time this + * context bank is re-allocated and no devices currently map to these + * tables. + */ + pgd = pgd_base; + for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) { + if (pgd_none(*pgd)) + continue; + ipmmu_free_pmds((pud_t *)pgd); + pgd++; + } + + kfree(pgd_base); +} + +/* + * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte) + * functions as they would flush the CPU TLB. + */ + +static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, + unsigned long iova) +{ + pte_t *pte; + + if (!pmd_none(*pmd)) + return pte_offset_kernel(pmd, iova); + + pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); + if (!pte) + return NULL; + + ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE); + *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE); + ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); + + return pte + pte_index(iova); +} + +static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd, + unsigned long iova) +{ + pud_t *pud = (pud_t *)pgd; + pmd_t *pmd; + + if (!pud_none(*pud)) + return pmd_offset(pud, iova); + + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); + if (!pmd) + return NULL; + + ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE); + *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE); + ipmmu_flush_pgtable(mmu, pud, sizeof(*pud)); + + return pmd + pmd_index(iova); +} + +static u64 ipmmu_page_prot(unsigned int prot, u64 type) +{ + u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF + | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV + | ARM_VMSA_PTE_NS | type; + + if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) + pgprot |= ARM_VMSA_PTE_AP_RDONLY; + + if (prot & IOMMU_CACHE) + pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; + + if (prot & IOMMU_EXEC) + pgprot &= ~ARM_VMSA_PTE_XN; + else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) + /* If no access create a faulting entry to avoid TLB fills. */ + pgprot &= ~ARM_VMSA_PTE_PAGE; + + return pgprot; +} + +static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, + unsigned long iova, unsigned long pfn, + size_t size, int prot) +{ + pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE); + unsigned int num_ptes = 1; + pte_t *pte, *start; + unsigned int i; + + pte = ipmmu_alloc_pte(mmu, pmd, iova); + if (!pte) + return -ENOMEM; + + start = pte; + + /* + * Install the page table entries. We can be called both for a single + * page or for a block of 16 physically contiguous pages. In the latter + * case set the PTE contiguous hint. + */ + if (size == SZ_64K) { + pteval |= ARM_VMSA_PTE_CONT; + num_ptes = ARM_VMSA_PTE_CONT_ENTRIES; + } + + for (i = num_ptes; i; --i) + *pte++ = pfn_pte(pfn++, __pgprot(pteval)); + + ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes); + + return 0; +} + +static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, + unsigned long iova, unsigned long pfn, + int prot) +{ + pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT); + + *pmd = pfn_pmd(pfn, __pgprot(pmdval)); + ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); + + return 0; +} + +static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t size, int prot) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + pgd_t *pgd = domain->pgd; + unsigned long flags; + unsigned long pfn; + pmd_t *pmd; + int ret; + + if (!pgd) + return -EINVAL; + + if (size & ~PAGE_MASK) + return -EINVAL; + + if (paddr & ~((1ULL << 40) - 1)) + return -ERANGE; + + pfn = __phys_to_pfn(paddr); + pgd += pgd_index(iova); + + /* Update the page tables. */ + spin_lock_irqsave(&domain->lock, flags); + + pmd = ipmmu_alloc_pmd(mmu, pgd, iova); + if (!pmd) { + ret = -ENOMEM; + goto done; + } + + switch (size) { + case SZ_2M: + ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot); + break; + case SZ_64K: + case SZ_4K: + ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot); + break; + default: + ret = -EINVAL; + break; + } + +done: + spin_unlock_irqrestore(&domain->lock, flags); + + if (!ret) + ipmmu_tlb_invalidate(domain); + + return ret; +} + +static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud) +{ + /* Free the page table. */ + pgtable_t table = pud_pgtable(*pud); + __free_page(table); + + /* Clear the PUD. */ + *pud = __pud(0); + ipmmu_flush_pgtable(mmu, pud, sizeof(*pud)); +} + +static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud, + pmd_t *pmd) +{ + unsigned int i; + + /* Free the page table. */ + if (pmd_table(*pmd)) { + pgtable_t table = pmd_pgtable(*pmd); + __free_page(table); + } + + /* Clear the PMD. */ + *pmd = __pmd(0); + ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); + + /* Check whether the PUD is still needed. */ + pmd = pmd_offset(pud, 0); + for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) { + if (!pmd_none(pmd[i])) + return; + } + + /* Clear the parent PUD. */ + ipmmu_clear_pud(mmu, pud); +} + +static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud, + pmd_t *pmd, pte_t *pte, unsigned int num_ptes) +{ + unsigned int i; + + /* Clear the PTE. */ + for (i = num_ptes; i; --i) + pte[i-1] = __pte(0); + + ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes); + + /* Check whether the PMD is still needed. */ + pte = pte_offset_kernel(pmd, 0); + for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) { + if (!pte_none(pte[i])) + return; + } + + /* Clear the parent PMD. */ + ipmmu_clear_pmd(mmu, pud, pmd); +} + +static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd) +{ + pte_t *pte, *start; + pteval_t pteval; + unsigned long pfn; + unsigned int i; + + pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); + if (!pte) + return -ENOMEM; + + /* Copy the PMD attributes. */ + pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK) + | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE; + + pfn = pmd_pfn(*pmd); + start = pte; + + for (i = IPMMU_PTRS_PER_PTE; i; --i) + *pte++ = pfn_pte(pfn++, __pgprot(pteval)); + + ipmmu_flush_pgtable(mmu, start, PAGE_SIZE); + *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE); + ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); + + return 0; +} + +static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte) +{ + unsigned int i; + + for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i) + pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT); + + ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES); +} + +static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain, + unsigned long iova, size_t size) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + unsigned long flags; + pgd_t *pgd = domain->pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + int ret = 0; + + if (!pgd) + return -EINVAL; + + if (size & ~PAGE_MASK) + return -EINVAL; + + pgd += pgd_index(iova); + pud = (pud_t *)pgd; + + spin_lock_irqsave(&domain->lock, flags); + + /* If there's no PUD or PMD we're done. */ + if (pud_none(*pud)) + goto done; + + pmd = pmd_offset(pud, iova); + if (pmd_none(*pmd)) + goto done; + + /* + * When freeing a 2MB block just clear the PMD. In the unlikely case the + * block is mapped as individual pages this will free the corresponding + * PTE page table. + */ + if (size == SZ_2M) { + ipmmu_clear_pmd(mmu, pud, pmd); + goto done; + } + + /* + * If the PMD has been mapped as a section remap it as pages to allow + * freeing individual pages. + */ + if (pmd_sect(*pmd)) + ipmmu_split_pmd(mmu, pmd); + + pte = pte_offset_kernel(pmd, iova); + + /* + * When freeing a 64kB block just clear the PTE entries. We don't have + * to care about the contiguous hint of the surrounding entries. + */ + if (size == SZ_64K) { + ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES); + goto done; + } + + /* + * If the PTE has been mapped with the contiguous hint set remap it and + * its surrounding PTEs to allow unmapping a single page. + */ + if (pte_val(*pte) & ARM_VMSA_PTE_CONT) + ipmmu_split_pte(mmu, pte); + + /* Clear the PTE. */ + ipmmu_clear_pte(mmu, pud, pmd, pte, 1); + +done: + spin_unlock_irqrestore(&domain->lock, flags); + + if (ret) + ipmmu_tlb_invalidate(domain); + + return 0; +} + +/* ----------------------------------------------------------------------------- + * IOMMU Operations + */ + +static int ipmmu_domain_init(struct iommu_domain *io_domain) +{ + struct ipmmu_vmsa_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return -ENOMEM; + + spin_lock_init(&domain->lock); + + domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); + if (!domain->pgd) { + kfree(domain); + return -ENOMEM; + } + + io_domain->priv = domain; + domain->io_domain = io_domain; + + return 0; +} + +static void ipmmu_domain_destroy(struct iommu_domain *io_domain) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + + /* + * Free the domain resources. We assume that all devices have already + * been detached. + */ + ipmmu_domain_destroy_context(domain); + ipmmu_free_pgtables(domain); + kfree(domain); +} + +static int ipmmu_attach_device(struct iommu_domain *io_domain, + struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct ipmmu_vmsa_device *mmu = archdata->mmu; + struct ipmmu_vmsa_domain *domain = io_domain->priv; + unsigned long flags; + int ret = 0; + + if (!mmu) { + dev_err(dev, "Cannot attach to IPMMU\n"); + return -ENXIO; + } + + spin_lock_irqsave(&domain->lock, flags); + + if (!domain->mmu) { + /* The domain hasn't been used yet, initialize it. */ + domain->mmu = mmu; + ret = ipmmu_domain_init_context(domain); + } else if (domain->mmu != mmu) { + /* + * Something is wrong, we can't attach two devices using + * different IOMMUs to the same domain. + */ + dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", + dev_name(mmu->dev), dev_name(domain->mmu->dev)); + ret = -EINVAL; + } + + spin_unlock_irqrestore(&domain->lock, flags); + + if (ret < 0) + return ret; + + ipmmu_utlb_enable(domain, archdata->utlb); + + return 0; +} + +static void ipmmu_detach_device(struct iommu_domain *io_domain, + struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct ipmmu_vmsa_domain *domain = io_domain->priv; + + ipmmu_utlb_disable(domain, archdata->utlb); + + /* + * TODO: Optimize by disabling the context when no device is attached. + */ +} + +static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + + if (!domain) + return -ENODEV; + + return ipmmu_create_mapping(domain, iova, paddr, size, prot); +} + +static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, + size_t size) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + int ret; + + ret = ipmmu_clear_mapping(domain, iova, size); + return ret ? 0 : size; +} + +static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, + dma_addr_t iova) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + pgd_t pgd; + pud_t pud; + pmd_t pmd; + pte_t pte; + + /* TODO: Is locking needed ? */ + + if (!domain->pgd) + return 0; + + pgd = *(domain->pgd + pgd_index(iova)); + if (pgd_none(pgd)) + return 0; + + pud = *pud_offset(&pgd, iova); + if (pud_none(pud)) + return 0; + + pmd = *pmd_offset(&pud, iova); + if (pmd_none(pmd)) + return 0; + + if (pmd_sect(pmd)) + return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK); + + pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); + if (pte_none(pte)) + return 0; + + return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); +} + +static int ipmmu_find_utlb(struct ipmmu_vmsa_device *mmu, struct device *dev) +{ + const struct ipmmu_vmsa_master *master = mmu->pdata->masters; + const char *devname = dev_name(dev); + unsigned int i; + + for (i = 0; i < mmu->pdata->num_masters; ++i, ++master) { + if (strcmp(master->name, devname) == 0) + return master->utlb; + } + + return -1; +} + +static int ipmmu_add_device(struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata; + struct ipmmu_vmsa_device *mmu; + struct iommu_group *group; + int utlb = -1; + int ret; + + if (dev->archdata.iommu) { + dev_warn(dev, "IOMMU driver already assigned to device %s\n", + dev_name(dev)); + return -EINVAL; + } + + /* Find the master corresponding to the device. */ + spin_lock(&ipmmu_devices_lock); + + list_for_each_entry(mmu, &ipmmu_devices, list) { + utlb = ipmmu_find_utlb(mmu, dev); + if (utlb >= 0) { + /* + * TODO Take a reference to the MMU to protect + * against device removal. + */ + break; + } + } + + spin_unlock(&ipmmu_devices_lock); + + if (utlb < 0) + return -ENODEV; + + if (utlb >= mmu->num_utlbs) + return -EINVAL; + + /* Create a device group and add the device to it. */ + group = iommu_group_alloc(); + if (IS_ERR(group)) { + dev_err(dev, "Failed to allocate IOMMU group\n"); + return PTR_ERR(group); + } + + ret = iommu_group_add_device(group, dev); + iommu_group_put(group); + + if (ret < 0) { + dev_err(dev, "Failed to add device to IPMMU group\n"); + return ret; + } + + archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); + if (!archdata) { + ret = -ENOMEM; + goto error; + } + + archdata->mmu = mmu; + archdata->utlb = utlb; + dev->archdata.iommu = archdata; + + /* + * Create the ARM mapping, used by the ARM DMA mapping core to allocate + * VAs. This will allocate a corresponding IOMMU domain. + * + * TODO: + * - Create one mapping per context (TLB). + * - Make the mapping size configurable ? We currently use a 2GB mapping + * at a 1GB offset to ensure that NULL VAs will fault. + */ + if (!mmu->mapping) { + struct dma_iommu_mapping *mapping; + + mapping = arm_iommu_create_mapping(&platform_bus_type, + SZ_1G, SZ_2G); + if (IS_ERR(mapping)) { + dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); + return PTR_ERR(mapping); + } + + mmu->mapping = mapping; + } + + /* Attach the ARM VA mapping to the device. */ + ret = arm_iommu_attach_device(dev, mmu->mapping); + if (ret < 0) { + dev_err(dev, "Failed to attach device to VA mapping\n"); + goto error; + } + + return 0; + +error: + kfree(dev->archdata.iommu); + dev->archdata.iommu = NULL; + iommu_group_remove_device(dev); + return ret; +} + +static void ipmmu_remove_device(struct device *dev) +{ + arm_iommu_detach_device(dev); + iommu_group_remove_device(dev); + kfree(dev->archdata.iommu); + dev->archdata.iommu = NULL; +} + +static struct iommu_ops ipmmu_ops = { + .domain_init = ipmmu_domain_init, + .domain_destroy = ipmmu_domain_destroy, + .attach_dev = ipmmu_attach_device, + .detach_dev = ipmmu_detach_device, + .map = ipmmu_map, + .unmap = ipmmu_unmap, + .iova_to_phys = ipmmu_iova_to_phys, + .add_device = ipmmu_add_device, + .remove_device = ipmmu_remove_device, + .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K, +}; + +/* ----------------------------------------------------------------------------- + * Probe/remove and init + */ + +static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) +{ + unsigned int i; + + /* Disable all contexts. */ + for (i = 0; i < 4; ++i) + ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); +} + +static int ipmmu_probe(struct platform_device *pdev) +{ + struct ipmmu_vmsa_device *mmu; + struct resource *res; + int irq; + int ret; + + if (!pdev->dev.platform_data) { + dev_err(&pdev->dev, "missing platform data\n"); + return -EINVAL; + } + + mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) { + dev_err(&pdev->dev, "cannot allocate device data\n"); + return -ENOMEM; + } + + mmu->dev = &pdev->dev; + mmu->pdata = pdev->dev.platform_data; + mmu->num_utlbs = 32; + + /* Map I/O memory and request IRQ. */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mmu->base)) + return PTR_ERR(mmu->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "no IRQ found\n"); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, + dev_name(&pdev->dev), mmu); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); + return irq; + } + + ipmmu_device_reset(mmu); + + /* + * We can't create the ARM mapping here as it requires the bus to have + * an IOMMU, which only happens when bus_set_iommu() is called in + * ipmmu_init() after the probe function returns. + */ + + spin_lock(&ipmmu_devices_lock); + list_add(&mmu->list, &ipmmu_devices); + spin_unlock(&ipmmu_devices_lock); + + platform_set_drvdata(pdev, mmu); + + return 0; +} + +static int ipmmu_remove(struct platform_device *pdev) +{ + struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); + + spin_lock(&ipmmu_devices_lock); + list_del(&mmu->list); + spin_unlock(&ipmmu_devices_lock); + + arm_iommu_release_mapping(mmu->mapping); + + ipmmu_device_reset(mmu); + + return 0; +} + +static struct platform_driver ipmmu_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "ipmmu-vmsa", + }, + .probe = ipmmu_probe, + .remove = ipmmu_remove, +}; + +static int __init ipmmu_init(void) +{ + int ret; + + ret = platform_driver_register(&ipmmu_driver); + if (ret < 0) + return ret; + + if (!iommu_present(&platform_bus_type)) + bus_set_iommu(&platform_bus_type, &ipmmu_ops); + + return 0; +} + +static void __exit ipmmu_exit(void) +{ + return platform_driver_unregister(&ipmmu_driver); +} + +subsys_initcall(ipmmu_init); +module_exit(ipmmu_exit); + +MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); +MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c index 08ba4972da9..61def7cb526 100644 --- a/drivers/iommu/msm_iommu_dev.c +++ b/drivers/iommu/msm_iommu_dev.c @@ -127,13 +127,12 @@ static void msm_iommu_reset(void __iomem *base, int ncb) static int msm_iommu_probe(struct platform_device *pdev) { - struct resource *r, *r2; + struct resource *r; struct clk *iommu_clk; struct clk *iommu_pclk; struct msm_iommu_drvdata *drvdata; struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; void __iomem *regs_base; - resource_size_t len; int ret, irq, par; if (pdev->id == -1) { @@ -178,35 +177,16 @@ static int msm_iommu_probe(struct platform_device *pdev) iommu_clk = NULL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase"); - - if (!r) { - ret = -ENODEV; - goto fail_clk; - } - - len = resource_size(r); - - r2 = request_mem_region(r->start, len, r->name); - if (!r2) { - pr_err("Could not request memory region: start=%p, len=%d\n", - (void *) r->start, len); - ret = -EBUSY; + regs_base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(regs_base)) { + ret = PTR_ERR(regs_base); goto fail_clk; } - regs_base = ioremap(r2->start, len); - - if (!regs_base) { - pr_err("Could not ioremap: start=%p, len=%d\n", - (void *) r2->start, len); - ret = -EBUSY; - goto fail_mem; - } - irq = platform_get_irq_byname(pdev, "secure_irq"); if (irq < 0) { ret = -ENODEV; - goto fail_io; + goto fail_clk; } msm_iommu_reset(regs_base, iommu_dev->ncb); @@ -222,14 +202,14 @@ static int msm_iommu_probe(struct platform_device *pdev) if (!par) { pr_err("%s: Invalid PAR value detected\n", iommu_dev->name); ret = -ENODEV; - goto fail_io; + goto fail_clk; } ret = request_irq(irq, msm_iommu_fault_handler, 0, "msm_iommu_secure_irpt_handler", drvdata); if (ret) { pr_err("Request IRQ %d failed with ret=%d\n", irq, ret); - goto fail_io; + goto fail_clk; } @@ -250,10 +230,6 @@ static int msm_iommu_probe(struct platform_device *pdev) clk_disable(iommu_pclk); return 0; -fail_io: - iounmap(regs_base); -fail_mem: - release_mem_region(r->start, len); fail_clk: if (iommu_clk) { clk_disable(iommu_clk); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 7fcbfc498fa..895af06a667 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -34,6 +34,9 @@ #include "omap-iopgtable.h" #include "omap-iommu.h" +#define to_iommu(dev) \ + ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) + #define for_each_iotlb_cr(obj, n, __i, cr) \ for (__i = 0; \ (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ @@ -391,6 +394,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da) __func__, start, da, bytes); iotlb_load_cr(obj, &cr); iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); + break; } } pm_runtime_put_sync(obj->dev); @@ -1037,19 +1041,18 @@ static void iopte_cachep_ctor(void *iopte) clean_dcache_area(iopte, IOPTE_TABLE_SIZE); } -static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, - u32 flags) +static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) { memset(e, 0, sizeof(*e)); e->da = da; e->pa = pa; - e->valid = 1; + e->valid = MMU_CAM_V; /* FIXME: add OMAP1 support */ - e->pgsz = flags & MMU_CAM_PGSZ_MASK; - e->endian = flags & MMU_RAM_ENDIAN_MASK; - e->elsz = flags & MMU_RAM_ELSZ_MASK; - e->mixed = flags & MMU_RAM_MIXED_MASK; + e->pgsz = pgsz; + e->endian = MMU_RAM_ENDIAN_LITTLE; + e->elsz = MMU_RAM_ELSZ_8; + e->mixed = 0; return iopgsz_to_bytes(e->pgsz); } @@ -1062,9 +1065,8 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, struct device *dev = oiommu->dev; struct iotlb_entry e; int omap_pgsz; - u32 ret, flags; + u32 ret; - /* we only support mapping a single iommu page for now */ omap_pgsz = bytes_to_iopgsz(bytes); if (omap_pgsz < 0) { dev_err(dev, "invalid size to map: %d\n", bytes); @@ -1073,9 +1075,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); - flags = omap_pgsz | prot; - - iotlb_init_entry(&e, da, pa, flags); + iotlb_init_entry(&e, da, pa, omap_pgsz); ret = omap_iopgtable_store_entry(oiommu, &e); if (ret) @@ -1248,12 +1248,6 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, return ret; } -static int omap_iommu_domain_has_cap(struct iommu_domain *domain, - unsigned long cap) -{ - return 0; -} - static int omap_iommu_add_device(struct device *dev) { struct omap_iommu_arch_data *arch_data; @@ -1305,7 +1299,6 @@ static struct iommu_ops omap_iommu_ops = { .map = omap_iommu_map, .unmap = omap_iommu_unmap, .iova_to_phys = omap_iommu_iova_to_phys, - .domain_has_cap = omap_iommu_domain_has_cap, .add_device = omap_iommu_add_device, .remove_device = omap_iommu_remove_device, .pgsize_bitmap = OMAP_IOMMU_PGSIZES, diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h index b6f9a51746c..f891683e3f0 100644 --- a/drivers/iommu/omap-iopgtable.h +++ b/drivers/iommu/omap-iopgtable.h @@ -93,6 +93,3 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) /* to find an entry in the second-level page table. */ #define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1)) #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da)) - -#define to_iommu(dev) \ - (platform_get_drvdata(to_platform_device(dev))) diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c index e3bc2e19b6d..bd97adecb1f 100644 --- a/drivers/iommu/shmobile-ipmmu.c +++ b/drivers/iommu/shmobile-ipmmu.c @@ -94,11 +94,6 @@ static int ipmmu_probe(struct platform_device *pdev) struct resource *res; struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "cannot get platform resources\n"); - return -ENOENT; - } ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL); if (!ipmmu) { dev_err(&pdev->dev, "cannot allocate device data\n"); @@ -106,19 +101,18 @@ static int ipmmu_probe(struct platform_device *pdev) } spin_lock_init(&ipmmu->flush_lock); ipmmu->dev = &pdev->dev; - ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); - if (!ipmmu->ipmmu_base) { - dev_err(&pdev->dev, "ioremap_nocache failed\n"); - return -ENOMEM; - } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ipmmu->ipmmu_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ipmmu->ipmmu_base)) + return PTR_ERR(ipmmu->ipmmu_base); + ipmmu->dev_names = pdata->dev_names; ipmmu->num_dev_names = pdata->num_dev_names; platform_set_drvdata(pdev, ipmmu); ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */ ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */ - ipmmu_iommu_init(ipmmu); - return 0; + return ipmmu_iommu_init(ipmmu); } static struct platform_driver ipmmu_driver = { |