summaryrefslogtreecommitdiffstats
path: root/drivers/xen/swiotlb-xen.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r--drivers/xen/swiotlb-xen.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ebd8f218a78..810ad419e34 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -96,8 +96,6 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
phys_addr_t paddr = dma;
- BUG_ON(paddr != dma); /* truncation has occurred, should never happen */
-
paddr |= baddr & ~PAGE_MASK;
return paddr;
@@ -399,11 +397,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* buffering it.
*/
if (dma_capable(dev, dev_addr, size) &&
- !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
+ !range_straddles_page_boundary(phys, size) &&
+ !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) &&
+ !swiotlb_force) {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
* by the function. */
- xen_dma_map_page(dev, page, offset, size, dir, attrs);
+ xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
return dev_addr;
}
@@ -417,7 +417,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_ERROR_CODE;
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
- map & ~PAGE_MASK, size, dir, attrs);
+ dev_addr, map & ~PAGE_MASK, size, dir, attrs);
dev_addr = xen_phys_to_bus(map);
/*
@@ -447,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
- xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
+ xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
@@ -495,14 +495,14 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (target == SYNC_FOR_CPU)
- xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
+ xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr))
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
if (target == SYNC_FOR_DEVICE)
- xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
+ xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
if (dir != DMA_FROM_DEVICE)
return;
@@ -557,6 +557,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
dma_addr_t dev_addr = xen_phys_to_bus(paddr);
if (swiotlb_force ||
+ xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) ||
!dma_capable(hwdev, dev_addr, sg->length) ||
range_straddles_page_boundary(paddr, sg->length)) {
phys_addr_t map = swiotlb_tbl_map_single(hwdev,
@@ -574,6 +575,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
return 0;
}
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
+ dev_addr,
map & ~PAGE_MASK,
sg->length,
dir,
@@ -584,6 +586,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
* xen_dma_map_page, only in the potential cache flushes executed
* by the function. */
xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
+ dev_addr,
paddr & ~PAGE_MASK,
sg->length,
dir,