summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c104
1 files changed, 82 insertions, 22 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bdc447fd476..c9c6053198d 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -41,6 +41,7 @@
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <linux/pci-ats.h>
+#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -78,6 +79,24 @@
#define LEVEL_STRIDE (9)
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
+
static inline int agaw_to_level(int agaw)
{
return agaw + 2;
@@ -2188,18 +2207,6 @@ static inline void iommu_prepare_isa(void)
static int md_domain_init(struct dmar_domain *domain, int guest_width);
-static int __init si_domain_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- int *ret = datax;
-
- *ret = iommu_domain_identity_map(si_domain,
- (uint64_t)start_pfn << PAGE_SHIFT,
- (uint64_t)end_pfn << PAGE_SHIFT);
- return *ret;
-
-}
-
static int __init si_domain_init(int hw)
{
struct dmar_drhd_unit *drhd;
@@ -2231,9 +2238,15 @@ static int __init si_domain_init(int hw)
return 0;
for_each_online_node(nid) {
- work_with_active_regions(nid, si_domain_work_fn, &ret);
- if (ret)
- return ret;
+ unsigned long start_pfn, end_pfn;
+ int i;
+
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ ret = iommu_domain_identity_map(si_domain,
+ PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -3984,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
- int gfp_order, int iommu_prot)
+ size_t size, int iommu_prot)
{
struct dmar_domain *dmar_domain = domain->priv;
u64 max_addr;
int prot = 0;
- size_t size;
int ret;
if (iommu_prot & IOMMU_READ)
@@ -3999,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
- size = PAGE_SIZE << gfp_order;
max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
u64 end;
@@ -4022,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
return ret;
}
-static int intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, int gfp_order)
+static size_t intel_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
- size_t size = PAGE_SIZE << gfp_order;
int order;
order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4035,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
- return order;
+ return PAGE_SIZE << order;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4065,6 +4075,54 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
+/*
+ * Group numbers are arbitrary. Device with the same group number
+ * indicate the iommu cannot differentiate between them. To avoid
+ * tracking used groups we just use the seg|bus|devfn of the lowest
+ * level we're able to differentiate devices
+ */
+static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *bridge;
+ union {
+ struct {
+ u8 devfn;
+ u8 bus;
+ u16 segment;
+ } pci;
+ u32 group;
+ } id;
+
+ if (iommu_no_mapping(dev))
+ return -ENODEV;
+
+ id.pci.segment = pci_domain_nr(pdev->bus);
+ id.pci.bus = pdev->bus->number;
+ id.pci.devfn = pdev->devfn;
+
+ if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
+ return -ENODEV;
+
+ bridge = pci_find_upstream_pcie_bridge(pdev);
+ if (bridge) {
+ if (pci_is_pcie(bridge)) {
+ id.pci.bus = bridge->subordinate->number;
+ id.pci.devfn = 0;
+ } else {
+ id.pci.bus = bridge->bus->number;
+ id.pci.devfn = bridge->devfn;
+ }
+ }
+
+ if (!pdev->is_virtfn && iommu_group_mf)
+ id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
+
+ *groupid = id.group;
+
+ return 0;
+}
+
static struct iommu_ops intel_iommu_ops = {
.domain_init = intel_iommu_domain_init,
.domain_destroy = intel_iommu_domain_destroy,
@@ -4074,6 +4132,8 @@ static struct iommu_ops intel_iommu_ops = {
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
+ .device_group = intel_iommu_device_group,
+ .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)