summaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-07-01 19:21:24 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-07-01 19:21:24 +0100
commitc85994e4771025ef2a66533eb1a4c6c2217b9cda (patch)
tree9f827d5587a5cac2c8a412317e2997b8add67b9a /drivers/pci/intel-iommu.c
parent3238c0c4d68d9a9022b411a11a4b933fbdb53a14 (diff)
intel-iommu: Ensure that PTE writes are 64-bit atomic, even on i386
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c37
1 files changed, 23 insertions, 14 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index ec7e032d5ab..eea1006c860 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -222,7 +222,12 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
- return (pte->val & VTD_PAGE_MASK);
+#ifdef CONFIG_64BIT
+ return pte->val & VTD_PAGE_MASK;
+#else
+ /* Must have a full atomic 64-bit read */
+ return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
+#endif
}
static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
@@ -712,6 +717,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
break;
if (!dma_pte_present(pte)) {
+ uint64_t pteval;
+
tmp_page = alloc_pgtable_page();
if (!tmp_page) {
@@ -719,15 +726,15 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
flags);
return NULL;
}
- domain_flush_cache(domain, tmp_page, PAGE_SIZE);
- dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
- /*
- * high level table always sets r/w, last level page
- * table control read/write
- */
- dma_set_pte_readable(pte);
- dma_set_pte_writable(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
+ domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
+ pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ if (cmpxchg64(&pte->val, 0ULL, pteval)) {
+ /* Someone else set it while we were thinking; use theirs. */
+ free_pgtable_page(tmp_page);
+ } else {
+ dma_pte_addr(pte);
+ domain_flush_cache(domain, pte, sizeof(*pte));
+ }
}
parent = phys_to_virt(dma_pte_addr(pte));
level--;
@@ -1666,6 +1673,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
while (nr_pages--) {
+ uint64_t tmp;
+
if (!sg_res) {
sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
@@ -1680,17 +1689,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
/* We don't need lock here, nobody else
* touches the iova range
*/
- if (unlikely(dma_pte_addr(pte))) {
+ tmp = cmpxchg64(&pte->val, 0ULL, pteval);
+ if (tmp) {
static int dumps = 5;
- printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx)\n",
- iov_pfn, pte->val);
+ printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
+ iov_pfn, tmp, (unsigned long long)pteval);
if (dumps) {
dumps--;
debug_dma_dump_mappings(NULL);
}
WARN_ON(1);
}
- pte->val = pteval;
pte++;
if (!nr_pages ||
(unsigned long)pte >> VTD_PAGE_SHIFT !=