summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-09-03 16:34:23 +0200
committerJoerg Roedel <joerg.roedel@amd.com>2009-09-03 16:34:23 +0200
commit03362a05c55122baff3556109c922285299dfec4 (patch)
tree85f83fbf4e1e8027eca05fbd8285041053a713c4 /arch/x86/kernel/amd_iommu.c
parent85da07c409daba3d067824f0051d58f70cb571a0 (diff)
parent4751a95134e05f1172131d2001c6991d671fa58c (diff)
Merge branch 'amd-iommu/passthrough' into amd-iommu/2.6.32
Conflicts: arch/x86/kernel/amd_iommu.c arch/x86/kernel/amd_iommu_init.c
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c141
1 files changed, 123 insertions, 18 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 8c93b7c7735..dc19ed43b54 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -41,6 +41,13 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
static LIST_HEAD(iommu_pd_list);
static DEFINE_SPINLOCK(iommu_pd_list_lock);
+/*
+ * Domain for untranslated devices - only allocated
+ * if iommu=pt passed on kernel cmd line.
+ */
+static struct protection_domain *pt_domain;
+
+#ifdef CONFIG_IOMMU_API
static struct iommu_ops amd_iommu_ops;
/*
@@ -1130,32 +1137,48 @@ static struct protection_domain *domain_for_device(u16 devid)
* If a device is not yet associated with a domain, this function does
* assigns it visible for the hardware
*/
-static void attach_device(struct amd_iommu *iommu,
- struct protection_domain *domain,
- u16 devid)
+static void __attach_device(struct amd_iommu *iommu,
+ struct protection_domain *domain,
+ u16 devid)
{
- unsigned long flags;
- u64 pte_root = virt_to_phys(domain->pt_root);
+ u64 pte_root;
- domain->dev_cnt += 1;
+ /* lock domain */
+ spin_lock(&domain->lock);
+
+ pte_root = virt_to_phys(domain->pt_root);
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
- amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
- amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
amd_iommu_dev_table[devid].data[2] = domain->id;
+ amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
+ amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
amd_iommu_pd_table[devid] = domain;
+
+ domain->dev_cnt += 1;
+
+ /* ready */
+ spin_unlock(&domain->lock);
+}
+
+static void attach_device(struct amd_iommu *iommu,
+ struct protection_domain *domain,
+ u16 devid)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ __attach_device(iommu, domain, devid);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
- /*
- * We might boot into a crash-kernel here. The crashed kernel
- * left the caches in the IOMMU dirty. So we have to flush
- * here to evict all dirty stuff.
- */
+ /*
+ * We might boot into a crash-kernel here. The crashed kernel
+ * left the caches in the IOMMU dirty. So we have to flush
+ * here to evict all dirty stuff.
+ */
iommu_queue_inv_dev_entry(iommu, devid);
iommu_flush_tlb_pde(iommu, domain->id);
}
@@ -1182,6 +1205,15 @@ static void __detach_device(struct protection_domain *domain, u16 devid)
/* ready */
spin_unlock(&domain->lock);
+
+ /*
+ * If we run in passthrough mode the device must be assigned to the
+ * passthrough domain if it is detached from any other domain
+ */
+ if (iommu_pass_through) {
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+ __attach_device(iommu, pt_domain, devid);
+ }
}
/*
@@ -1227,6 +1259,8 @@ static int device_change_notifier(struct notifier_block *nb,
case BUS_NOTIFY_UNBOUND_DRIVER:
if (!domain)
goto out;
+ if (iommu_pass_through)
+ break;
detach_device(domain, devid);
break;
case BUS_NOTIFY_ADD_DEVICE:
@@ -2051,19 +2085,47 @@ static void cleanup_domain(struct protection_domain *domain)
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
}
-static int amd_iommu_domain_init(struct iommu_domain *dom)
+static void protection_domain_free(struct protection_domain *domain)
+{
+ if (!domain)
+ return;
+
+ if (domain->id)
+ domain_id_free(domain->id);
+
+ kfree(domain);
+}
+
+static struct protection_domain *protection_domain_alloc(void)
{
struct protection_domain *domain;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
- return -ENOMEM;
+ return NULL;
spin_lock_init(&domain->lock);
- domain->mode = PAGE_MODE_3_LEVEL;
domain->id = domain_id_alloc();
if (!domain->id)
+ goto out_err;
+
+ return domain;
+
+out_err:
+ kfree(domain);
+
+ return NULL;
+}
+
+static int amd_iommu_domain_init(struct iommu_domain *dom)
+{
+ struct protection_domain *domain;
+
+ domain = protection_domain_alloc();
+ if (!domain)
goto out_free;
+
+ domain->mode = PAGE_MODE_3_LEVEL;
domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
if (!domain->pt_root)
goto out_free;
@@ -2073,7 +2135,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
return 0;
out_free:
- kfree(domain);
+ protection_domain_free(domain);
return -ENOMEM;
}
@@ -2254,3 +2316,46 @@ static struct iommu_ops amd_iommu_ops = {
.domain_has_cap = amd_iommu_domain_has_cap,
};
+/*****************************************************************************
+ *
+ * The next functions do a basic initialization of IOMMU for pass through
+ * mode
+ *
+ * In passthrough mode the IOMMU is initialized and enabled but not used for
+ * DMA-API translation.
+ *
+ *****************************************************************************/
+
+int __init amd_iommu_init_passthrough(void)
+{
+ struct pci_dev *dev = NULL;
+ u16 devid, devid2;
+
+ /* allocate passthroug domain */
+ pt_domain = protection_domain_alloc();
+ if (!pt_domain)
+ return -ENOMEM;
+
+ pt_domain->mode |= PAGE_MODE_NONE;
+
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ struct amd_iommu *iommu;
+
+ devid = calc_devid(dev->bus->number, dev->devfn);
+ if (devid > amd_iommu_last_bdf)
+ continue;
+
+ devid2 = amd_iommu_alias_table[devid];
+
+ iommu = amd_iommu_rlookup_table[devid2];
+ if (!iommu)
+ continue;
+
+ __attach_device(iommu, pt_domain, devid);
+ __attach_device(iommu, pt_domain, devid2);
+ }
+
+ pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
+
+ return 0;
+}