summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 07:55:21 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 07:55:21 +0900
commit8ceafbfa91ffbdbb2afaea5c24ccb519ffb8b587 (patch)
tree98c9ea93362536f1ddd73175b13b7847583350df
parent42a2d923cc349583ebf6fdd52a7d35e1c2f7e6bd (diff)
parent26ba47b18318abe7dadbe9294a611c0e932651d8 (diff)
Merge branch 'for-linus-dma-masks' of git://git.linaro.org/people/rmk/linux-arm
Pull DMA mask updates from Russell King: "This series cleans up the handling of DMA masks in a lot of drivers, fixing some bugs as we go. Some of the more serious errors include: - drivers which only set their coherent DMA mask if the attempt to set the streaming mask fails. - drivers which test for a NULL dma mask pointer, and then set the dma mask pointer to a location in their module .data section - which will cause problems if the module is reloaded. To counter these, I have introduced two helper functions: - dma_set_mask_and_coherent() takes care of setting both the streaming and coherent masks at the same time, with the correct error handling as specified by the API. - dma_coerce_mask_and_coherent() which resolves the problem of drivers forcefully setting DMA masks. This is more a marker for future work to further clean these locations up - the code which creates the devices really should be initialising these, but to fix that in one go along with this change could potentially be very disruptive. The last thing this series does is prise away some of Linux's addition to "DMA addresses are physical addresses and RAM always starts at zero". We have ARM LPAE systems where all system memory is above 4GB physical, hence having DMA masks interpreted by (eg) the block layers as describing physical addresses in the range 0..DMAMASK fails on these platforms. Santosh Shilimkar addresses this in this series; the patches were copied to the appropriate people multiple times but were ignored. Fixing this also gets rid of some ARM weirdness in the setup of the max*pfn variables, and brings ARM into line with every other Linux architecture as far as those go" * 'for-linus-dma-masks' of git://git.linaro.org/people/rmk/linux-arm: (52 commits) ARM: 7805/1: mm: change max*pfn to include the physical offset of memory ARM: 7797/1: mmc: Use dma_max_pfn(dev) helper for bounce_limit calculations ARM: 7796/1: scsi: Use dma_max_pfn(dev) helper for bounce_limit calculations ARM: 7795/1: mm: dma-mapping: Add dma_max_pfn(dev) helper function ARM: 7794/1: block: Rename parameter dma_mask to max_addr for blk_queue_bounce_limit() ARM: DMA-API: better handing of DMA masks for coherent allocations ARM: 7857/1: dma: imx-sdma: setup dma mask DMA-API: firmware/google/gsmi.c: avoid direct access to DMA masks DMA-API: dcdbas: update DMA mask handing DMA-API: dma: edma.c: no need to explicitly initialize DMA masks DMA-API: usb: musb: use platform_device_register_full() to avoid directly messing with dma masks DMA-API: crypto: remove last references to 'static struct device *dev' DMA-API: crypto: fix ixp4xx crypto platform device support DMA-API: others: use dma_set_coherent_mask() DMA-API: staging: use dma_set_coherent_mask() DMA-API: usb: use new dma_coerce_mask_and_coherent() DMA-API: usb: use dma_set_coherent_mask() DMA-API: parport: parport_pc.c: use dma_coerce_mask_and_coherent() DMA-API: net: octeon: use dma_coerce_mask_and_coherent() DMA-API: net: nxp/lpc_eth: use dma_coerce_mask_and_coherent() ...
-rw-r--r--Documentation/DMA-API-HOWTO.txt37
-rw-r--r--Documentation/DMA-API.txt8
-rw-r--r--arch/arm/include/asm/dma-mapping.h8
-rw-r--r--arch/arm/mm/dma-mapping.c51
-rw-r--r--arch/arm/mm/init.c12
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/powerpc/kernel/vio.c3
-rw-r--r--block/blk-settings.c8
-rw-r--r--drivers/amba/bus.c6
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c5
-rw-r--r--drivers/ata/pata_octeon_cf.c5
-rw-r--r--drivers/block/nvme-core.c10
-rw-r--r--drivers/crypto/ixp4xx_crypto.c48
-rw-r--r--drivers/dma/amba-pl08x.c5
-rw-r--r--drivers/dma/dw/platform.c8
-rw-r--r--drivers/dma/edma.c10
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/firmware/dcdbas.c32
-rw-r--r--drivers/firmware/google/gsmi.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c5
-rw-r--r--drivers/media/platform/omap3isp/isp.c6
-rw-r--r--drivers/media/platform/omap3isp/isp.h3
-rw-r--r--drivers/mmc/card/queue.c3
-rw-r--r--drivers/mmc/host/sdhci-acpi.c5
-rw-r--r--drivers/net/ethernet/broadcom/b44.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c13
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c18
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c15
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c12
-rw-r--r--drivers/net/wireless/b43/dma.c9
-rw-r--r--drivers/net/wireless/b43legacy/dma.c9
-rw-r--r--drivers/of/platform.c3
-rw-r--r--drivers/parport/parport_pc.c8
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/staging/dwc2/platform.c5
-rw-r--r--drivers/staging/et131x/et131x.c17
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c8
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c4
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c7
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c7
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c4
-rw-r--r--drivers/usb/host/bcma-hcd.c3
-rw-r--r--drivers/usb/host/ehci-atmel.c7
-rw-r--r--drivers/usb/host/ehci-exynos.c7
-rw-r--r--drivers/usb/host/ehci-octeon.c4
-rw-r--r--drivers/usb/host/ehci-omap.c10
-rw-r--r--drivers/usb/host/ehci-orion.c7
-rw-r--r--drivers/usb/host/ehci-platform.c10
-rw-r--r--drivers/usb/host/ehci-spear.c7
-rw-r--r--drivers/usb/host/ehci-tegra.c7
-rw-r--r--drivers/usb/host/ohci-at91.c9
-rw-r--r--drivers/usb/host/ohci-exynos.c7
-rw-r--r--drivers/usb/host/ohci-nxp.c5
-rw-r--r--drivers/usb/host/ohci-octeon.c5
-rw-r--r--drivers/usb/host/ohci-omap3.c10
-rw-r--r--drivers/usb/host/ohci-pxa27x.c8
-rw-r--r--drivers/usb/host/ohci-sa1111.c6
-rw-r--r--drivers/usb/host/ohci-spear.c7
-rw-r--r--drivers/usb/host/ssb-hcd.c3
-rw-r--r--drivers/usb/host/uhci-platform.c7
-rw-r--r--drivers/video/amba-clcd.c5
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/dma-mapping.h31
-rw-r--r--sound/arm/pxa2xx-pcm.c10
-rw-r--r--sound/soc/atmel/atmel-pcm.c11
-rw-r--r--sound/soc/blackfin/bf5xx-ac97-pcm.c11
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.c10
-rw-r--r--sound/soc/davinci/davinci-pcm.c9
-rw-r--r--sound/soc/fsl/fsl_dma.c9
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c12
-rw-r--r--sound/soc/fsl/mpc5200_dma.c10
-rw-r--r--sound/soc/jz4740/jz4740-pcm.c12
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c9
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c9
-rw-r--r--sound/soc/omap/omap-pcm.c11
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c11
-rw-r--r--sound/soc/s6000/s6000-pcm.c9
-rw-r--r--sound/soc/samsung/dma.c11
-rw-r--r--sound/soc/samsung/idma.c11
91 files changed, 447 insertions, 448 deletions
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 14129f149a7..5e983031cc1 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -101,14 +101,23 @@ style to do this even if your device holds the default setting,
because this shows that you did think about these issues wrt. your
device.
-The query is performed via a call to dma_set_mask():
+The query is performed via a call to dma_set_mask_and_coherent():
- int dma_set_mask(struct device *dev, u64 mask);
+ int dma_set_mask_and_coherent(struct device *dev, u64 mask);
-The query for consistent allocations is performed via a call to
-dma_set_coherent_mask():
+which will query the mask for both streaming and coherent APIs together.
+If you have some special requirements, then the following two separate
+queries can be used instead:
- int dma_set_coherent_mask(struct device *dev, u64 mask);
+ The query for streaming mappings is performed via a call to
+ dma_set_mask():
+
+ int dma_set_mask(struct device *dev, u64 mask);
+
+ The query for consistent allocations is performed via a call
+ to dma_set_coherent_mask():
+
+ int dma_set_coherent_mask(struct device *dev, u64 mask);
Here, dev is a pointer to the device struct of your device, and mask
is a bit mask describing which bits of an address your device
@@ -137,7 +146,7 @@ exactly why.
The standard 32-bit addressing device would do something like this:
- if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING
"mydev: No suitable DMA available.\n");
goto ignore_this_device;
@@ -171,22 +180,20 @@ the case would look like this:
int using_dac, consistent_using_dac;
- if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
using_dac = 1;
consistent_using_dac = 1;
- dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
- } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
using_dac = 0;
consistent_using_dac = 0;
- dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
} else {
printk(KERN_WARNING
"mydev: No suitable DMA available.\n");
goto ignore_this_device;
}
-dma_set_coherent_mask() will always be able to set the same or a
-smaller mask as dma_set_mask(). However for the rare case that a
+The coherent coherent mask will always be able to set the same or a
+smaller mask as the streaming mask. However for the rare case that a
device driver only uses consistent allocations, one would have to
check the return value from dma_set_coherent_mask().
@@ -199,9 +206,9 @@ address you might do something like:
goto ignore_this_device;
}
-When dma_set_mask() is successful, and returns zero, the kernel saves
-away this mask you have provided. The kernel will use this
-information later when you make DMA mappings.
+When dma_set_mask() or dma_set_mask_and_coherent() is successful, and
+returns zero, the kernel saves away this mask you have provided. The
+kernel will use this information later when you make DMA mappings.
There is a case which we are aware of at this time, which is worth
mentioning in this documentation. If your device supports multiple
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 78a6c569d20..e865279cec5 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -142,6 +142,14 @@ internal API for use by the platform than an external API for use by
driver writers.
int
+dma_set_mask_and_coherent(struct device *dev, u64 mask)
+
+Checks to see if the mask is possible and updates the device
+streaming and coherent DMA mask parameters if it is.
+
+Returns: 0 if successful and a negative error if not.
+
+int
dma_set_mask(struct device *dev, u64 mask)
Checks to see if the mask is possible and updates the device
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 5b579b95150..863cd84eb1a 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -64,6 +64,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
+
#else
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
@@ -86,6 +87,13 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
}
#endif
+/* The ARM override for dma_max_pfn() */
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+ return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
+}
+#define dma_max_pfn(dev) dma_max_pfn(dev)
+
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1272ed202dd..644d91f73b0 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
static u64 get_coherent_dma_mask(struct device *dev)
{
- u64 mask = (u64)arm_dma_limit;
+ u64 mask = (u64)DMA_BIT_MASK(32);
if (dev) {
mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0;
}
- if ((~mask) & (u64)arm_dma_limit) {
- dev_warn(dev, "coherent DMA mask %#llx is smaller "
- "than system GFP_DMA mask %#llx\n",
- mask, (u64)arm_dma_limit);
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then fail the
+ * allocation.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
+ dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+ mask);
+ dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+ return 0;
+ }
+
+ /*
+ * Now check that the mask, when translated to a PFN,
+ * fits within the allowable addresses which we can
+ * allocate.
+ */
+ if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
+ dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+ mask,
+ dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+ arm_dma_pfn_limit + 1);
return 0;
}
}
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
int dma_supported(struct device *dev, u64 mask)
{
- if (mask < (u64)arm_dma_limit)
+ unsigned long limit;
+
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then we must
+ * indicate that DMA to this device is not supported.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
+ return 0;
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ limit = dma_to_pfn(dev, mask);
+
+ if (limit < arm_dma_pfn_limit)
return 0;
+
return 1;
}
EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ca907f805c5..3e8f106ee5f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -209,6 +209,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
* so a successful GFP_DMA allocation will always satisfy this.
*/
phys_addr_t arm_dma_limit;
+unsigned long arm_dma_pfn_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
@@ -231,6 +232,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
+ arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
#endif
}
@@ -418,12 +420,10 @@ void __init bootmem_init(void)
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we
* also get rid of some of the stuff above as well.
- *
- * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
- * the system, not the maximum PFN.
*/
- max_low_pfn = max_low - PHYS_PFN_OFFSET;
- max_pfn = max_high - PHYS_PFN_OFFSET;
+ min_low_pfn = min;
+ max_low_pfn = max_low;
+ max_pfn = max_high;
}
/*
@@ -529,7 +529,7 @@ static inline void free_area_high(unsigned long pfn, unsigned long end)
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
- unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+ unsigned long max_low = max_low_pfn;
struct memblock_region *mem, *res;
/* set highmem page free */
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9ad8f0..d5a982d15a8 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm);
#ifdef CONFIG_ZONE_DMA
extern phys_addr_t arm_dma_limit;
+extern unsigned long arm_dma_pfn_limit;
#else
#define arm_dma_limit ((phys_addr_t)~0)
+#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
#endif
extern phys_addr_t arm_lowmem_limit;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f99cefbd84e..e7d0c88f621 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1419,8 +1419,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
/* needed to ensure proper operation of coherent allocations
* later, in case driver doesn't set it explicitly */
- dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
- dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
+ dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
}
/* register with generic device framework */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c50ecf0ea3b..026c1517505 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -195,17 +195,17 @@ EXPORT_SYMBOL(blk_queue_make_request);
/**
* blk_queue_bounce_limit - set bounce buffer limit for queue
* @q: the request queue for the device
- * @dma_mask: the maximum address the device can handle
+ * @max_addr: the maximum address the device can handle
*
* Description:
* Different hardware can have different requirements as to what pages
* it can do I/O directly to. A low level driver can call
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
- * buffers for doing I/O to pages residing above @dma_mask.
+ * buffers for doing I/O to pages residing above @max_addr.
**/
-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
+void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
{
- unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
+ unsigned long b_pfn = max_addr >> PAGE_SHIFT;
int dma = 0;
q->bounce_gfp = GFP_NOIO;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index c6707278a6b..c4876ac9151 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -552,7 +552,6 @@ amba_aphb_device_add(struct device *parent, const char *name,
if (!dev)
return ERR_PTR(-ENOMEM);
- dev->dma_mask = dma_mask;
dev->dev.coherent_dma_mask = dma_mask;
dev->irq[0] = irq1;
dev->irq[1] = irq2;
@@ -619,7 +618,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
dev_set_name(&dev->dev, "%s", name);
dev->dev.release = amba_device_release;
dev->dev.bus = &amba_bustype;
- dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->res.name = dev_name(&dev->dev);
}
@@ -663,9 +662,6 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
amba_device_initialize(dev, dev->dev.init_name);
dev->dev.init_name = NULL;
- if (!dev->dev.coherent_dma_mask && dev->dma_mask)
- dev_warn(&dev->dev, "coherent dma mask is unset\n");
-
return amba_device_add(dev, parent);
}
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 1ec53f8ca96..ddf470c2341 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -144,6 +144,7 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
struct ata_host *host;
struct ata_port *ap;
struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
+ int ret;
cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -157,7 +158,9 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
return -ENOMEM;
/* acquire resources and fill host */
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index c51bbb9ea8e..83c4ddb1bc7 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1014,8 +1014,9 @@ static int octeon_cf_probe(struct platform_device *pdev)
}
cf_port->c0 = ap->ioaddr.ctl_addr;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rv)
+ return rv;
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index da52092980e..26d03fa0bf2 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1949,12 +1949,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (pci_request_selected_regions(pdev, bars, "nvme"))
goto disable_pci;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- else
- goto disable_pci;
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto disable;
pci_set_drvdata(pdev, dev);
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -2168,6 +2165,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev;
+
result = nvme_set_instance(dev);
if (result)
goto free;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 21180d6cad6..214357e12dc 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -218,23 +218,9 @@ static dma_addr_t crypt_phys;
static int support_aes = 1;
-static void dev_release(struct device *dev)
-{
- return;
-}
-
#define DRIVER_NAME "ixp4xx_crypto"
-static struct platform_device pseudo_dev = {
- .name = DRIVER_NAME,
- .id = 0,
- .num_resources = 0,
- .dev = {
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .release = dev_release,
- }
-};
-static struct device *dev = &pseudo_dev.dev;
+static struct platform_device *pdev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
{
@@ -263,6 +249,7 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
static int setup_crypt_desc(void)
{
+ struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
@@ -363,6 +350,7 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
static void one_packet(dma_addr_t phys)
{
+ struct device *dev = &pdev->dev;
struct crypt_ctl *crypt;
struct ixp_ctx *ctx;
int failed;
@@ -432,7 +420,7 @@ static void crypto_done_action(unsigned long arg)
tasklet_schedule(&crypto_done_tasklet);
}
-static int init_ixp_crypto(void)
+static int init_ixp_crypto(struct device *dev)
{
int ret = -ENODEV;
u32 msg[2] = { 0, 0 };
@@ -519,7 +507,7 @@ err:
return ret;
}
-static void release_ixp_crypto(void)
+static void release_ixp_crypto(struct device *dev)
{
qmgr_disable_irq(RECV_QID);
tasklet_kill(&crypto_done_tasklet);
@@ -886,6 +874,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
struct buffer_desc src_hook;
+ struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1010,6 +999,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
unsigned int cryptlen;
struct buffer_desc *buf, src_hook;
struct aead_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1418,20 +1408,30 @@ static struct ixp_alg ixp4xx_algos[] = {
} };
#define IXP_POSTFIX "-ixp4xx"
+
+static const struct platform_device_info ixp_dev_info __initdata = {
+ .name = DRIVER_NAME,
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
- int i,err ;
+ int i, err ;
- if (platform_device_register(&pseudo_dev))
- return -ENODEV;
+ pdev = platform_device_register_full(&ixp_dev_info);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ dev = &pdev->dev;
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
- err = init_ixp_crypto();
+ err = init_ixp_crypto(&pdev->dev);
if (err) {
- platform_device_unregister(&pseudo_dev);
+ platform_device_unregister(pdev);
return err;
}
for (i=0; i< num; i++) {
@@ -1495,8 +1495,8 @@ static void __exit ixp_module_exit(void)
if (ixp4xx_algos[i].registered)
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
}
- release_ixp_crypto();
- platform_device_unregister(&pseudo_dev);
+ release_ixp_crypto(&pdev->dev);
+ platform_device_unregister(pdev);
}
module_init(ixp_module_init);
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fce46c5bf1c..e51a9832ef0 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2055,6 +2055,11 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
return ret;
+ /* Ensure that we can do DMA */
+ ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out_no_pl08x;
+
/* Create the driver state holder */
pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
if (!pl08x) {
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index e35d9759031..453822cc4f9 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -191,11 +191,9 @@ static int dw_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- /* Apply default dma_mask if needed */
- if (!dev->dma_mask) {
- dev->dma_mask = &dev->coherent_dma_mask;
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
- }
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
pdata = dev_get_platdata(dev);
if (!pdata)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 10b577fcf48..bef8a368c8d 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -634,6 +634,10 @@ static int edma_probe(struct platform_device *pdev)
struct edma_cc *ecc;
int ret;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc) {
dev_err(&pdev->dev, "Can't allocate controller\n");
@@ -705,11 +709,13 @@ static struct platform_device *pdev0, *pdev1;
static const struct platform_device_info edma_dev_info0 = {
.name = "edma-dma-engine",
.id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct platform_device_info edma_dev_info1 = {
.name = "edma-dma-engine",
.id = 1,
+ .dma_mask = DMA_BIT_MASK(32),
};
static int edma_init(void)
@@ -723,8 +729,6 @@ static int edma_init(void)
ret = PTR_ERR(pdev0);
goto out;
}
- pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
- pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
if (EDMA_CTLRS == 2) {
@@ -734,8 +738,6 @@ static int edma_init(void)
platform_device_unregister(pdev0);
ret = PTR_ERR(pdev1);
}
- pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
- pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
out:
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fc43603cf0b..c1fd504cae2 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1432,6 +1432,10 @@ static int __init sdma_probe(struct platform_device *pdev)
return -EINVAL;
}
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma)
return -ENOMEM;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a562d24d20b..df8b10fd172 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2903,6 +2903,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pdat = dev_get_platdata(&adev->dev);
+ ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
/* Allocate a new DMAC and its Channels */
pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index ff080ee2019..1b5e8e46226 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -545,12 +545,15 @@ static int dcdbas_probe(struct platform_device *dev)
host_control_action = HC_ACTION_NONE;
host_control_smi_type = HC_SMITYPE_NONE;
+ dcdbas_pdev = dev;
+
/*
* BIOS SMI calls require buffer addresses be in 32-bit address space.
* This is done by setting the DMA mask below.
*/
- dcdbas_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- dcdbas_pdev->dev.dma_mask = &dcdbas_pdev->dev.coherent_dma_mask;
+ error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
+ if (error)
+ return error;
error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
if (error)
@@ -581,6 +584,14 @@ static struct platform_driver dcdbas_driver = {
.remove = dcdbas_remove,
};
+static const struct platform_device_info dcdbas_dev_info __initdata = {
+ .name = DRIVER_NAME,
+ .id = -1,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static struct platform_device *dcdbas_pdev_reg;
+
/**
* dcdbas_init: initialize driver
*/
@@ -592,20 +603,14 @@ static int __init dcdbas_init(void)
if (error)
return error;
- dcdbas_pdev = platform_device_alloc(DRIVER_NAME, -1);
- if (!dcdbas_pdev) {
- error = -ENOMEM;
+ dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
+ if (IS_ERR(dcdbas_pdev_reg)) {
+ error = PTR_ERR(dcdbas_pdev_reg);
goto err_unregister_driver;
}
- error = platform_device_add(dcdbas_pdev);
- if (error)
- goto err_free_device;
-
return 0;
- err_free_device:
- platform_device_put(dcdbas_pdev);
err_unregister_driver:
platform_driver_unregister(&dcdbas_driver);
return error;
@@ -628,8 +633,9 @@ static void __exit dcdbas_exit(void)
* all sysfs attributes belonging to this module have been
* released.
*/
- smi_data_buf_free();
- platform_device_unregister(dcdbas_pdev);
+ if (dcdbas_pdev)
+ smi_data_buf_free();
+ platform_device_unregister(dcdbas_pdev_reg);
platform_driver_unregister(&dcdbas_driver);
}
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 6eb535ffedd..e5a67b24587 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -764,6 +764,13 @@ static __init int gsmi_system_valid(void)
static struct kobject *gsmi_kobj;
static struct efivars efivars;
+static const struct platform_device_info gsmi_dev_info = {
+ .name = "gsmi",
+ .id = -1,
+ /* SMI callbacks require 32bit addresses */
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
static __init int gsmi_init(void)
{
unsigned long flags;
@@ -776,7 +783,7 @@ static __init int gsmi_init(void)
gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
/* register device */
- gsmi_dev.pdev = platform_device_register_simple("gsmi", -1, NULL, 0);
+ gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
if (IS_ERR(gsmi_dev.pdev)) {
printk(KERN_ERR "gsmi: unable to register platform device\n");
return PTR_ERR(gsmi_dev.pdev);
@@ -785,10 +792,6 @@ static __init int gsmi_init(void)
/* SMI access needs to be serialized */
spin_lock_init(&gsmi_dev.lock);
- /* SMI callbacks require 32bit addresses */
- gsmi_dev.pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- gsmi_dev.pdev->dev.dma_mask =
- &gsmi_dev.pdev->dev.coherent_dma_mask;
ret = -ENOMEM;
gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index bb82ef78ca8..81192d00b39 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -286,7 +286,11 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ int ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
return drm_platform_init(&exynos_drm_driver, pdev);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index acf667859cb..701c4c10e08 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -664,8 +664,9 @@ static int omap_dmm_probe(struct platform_device *dev)
}
/* set dma mask for device */
- /* NOTE: this is a workaround for the hwmod not initializing properly */
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index df3a0ec7fd2..1c360803966 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2182,9 +2182,9 @@ static int isp_probe(struct platform_device *pdev)
isp->pdata = pdata;
isp->ref_count = 0;
- isp->raw_dmamask = DMA_BIT_MASK(32);
- isp->dev->dma_mask = &isp->raw_dmamask;
- isp->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, isp);
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index cd3eff45ae7..ce65d3ae1aa 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -152,7 +152,6 @@ struct isp_xclk {
* @mmio_base_phys: Array with physical L4 bus addresses for ISP register
* regions.
* @mmio_size: Array with ISP register regions size in bytes.
- * @raw_dmamask: Raw DMA mask
* @stat_lock: Spinlock for handling statistics
* @isp_mutex: Mutex for serializing requests to ISP.
* @crashed: Bitmask of crashed entities (indexed by entity ID)
@@ -190,8 +189,6 @@ struct isp_device {
unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
- u64 raw_dmamask;
-
/* ISP Obj */
spinlock_t stat_lock; /* common lock for statistic drivers */
struct mutex isp_mutex; /* For handling ref_count field */
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fa9632eb63f..357bbc54fe4 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -15,6 +15,7 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -196,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = *mmc_dev(host)->dma_mask;
+ limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index cdd4ce0d7c9..ef19874fcd1 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -310,8 +310,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
dma_mask = DMA_BIT_MASK(32);
}
- dev->dma_mask = &dev->coherent_dma_mask;
- dev->coherent_dma_mask = dma_mask;
+ err = dma_coerce_mask_and_coherent(dev, dma_mask);
+ if (err)
+ goto err_free;
}
if (c->slot) {
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 079a597fa20..90e54d5488d 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2193,8 +2193,7 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_free_dev;
}
- if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
- dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
dev_err(sdev->dev,
"Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index bb2f2029150..e622cc1f96f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12140,12 +12140,8 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
{
struct device *dev = &bp->pdev->dev;
- if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
- if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
- dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
- return -EIO;
- }
- } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
dev_err(dev, "System does not support DMA, aborting\n");
return -EIO;
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f276433d37c..248bc37cb41 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3299,17 +3299,12 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
*using_dac = true;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err)
- goto release_regions;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto release_regions;
*using_dac = false;
}
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 741d3bff5ae..cb2bb6fccbc 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4487,19 +4487,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
- status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!status) {
- status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (status < 0) {
- dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
- goto free_netdev;
- }
netdev->features |= NETIF_F_HIGHDMA;
} else {
- status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (!status)
- status = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
+ status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index ad6800ad1bf..e38622825fa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1018,19 +1018,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
- !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /* according to DMA-API-HOWTO, coherent calls will always
- * succeed if the set call did
- */
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
pr_err("No usable DMA config, aborting\n");
goto err_dma;
}
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
}
netdev->netdev_ops = &e1000_netdev_ops;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 4ef786775ac..aedd5736a87 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6553,21 +6553,15 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ebe6370c4b1..2ac14bdd5fb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2035,21 +2035,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 9fadbb28cf0..04bf22e5ee3 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2637,21 +2637,15 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
}
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 9f6b236828e..57e390cbe6d 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -408,20 +408,14 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma_mask;
- }
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bd8f5239dfe..0066f0aefbf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7824,19 +7824,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
pci_using_dac = 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 038bfc8b761..92ef4cb5a8e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3421,19 +3421,14 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
}
pci_using_dac = 0;
}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index a061b93efe6..ba3ca18611f 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1399,8 +1399,10 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
if (pldat->dma_buff_base_v == 0) {
- pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF;
- pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_out_free_irq;
+
pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
/* Allocate a chunk of memory for the DMA ethernet buffers
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 1b326cbcd34..7dc3e9b06d7 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1552,8 +1552,9 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (result)
+ goto err;
netif_carrier_off(netdev);
result = register_netdev(netdev);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 07c9bc4c61b..2e27837ce6a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1121,7 +1121,7 @@ static int efx_init_io(struct efx_nic *efx)
*/
while (dma_mask > 0x7fffffffUL) {
if (dma_supported(&pci_dev->dev, dma_mask)) {
- rc = dma_set_mask(&pci_dev->dev, dma_mask);
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
if (rc == 0)
break;
}
@@ -1134,16 +1134,6 @@ static int efx_init_io(struct efx_nic *efx)
}
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
- rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
- if (rc) {
- /* dma_set_coherent_mask() is not *allowed* to
- * fail with a mask that dma_set_mask() accepted,
- * but just in case...
- */
- netif_err(efx, probe, efx->net_dev,
- "failed to set consistent DMA mask\n");
- goto fail2;
- }
efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index c51d2dc489e..1d7982afc0a 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1065,12 +1065,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = dma_set_mask(dev->dev->dma_dev, mask);
- if (!err) {
- err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
- if (!err)
- break;
- }
+ err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = true;
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 42eb26c99e1..b2ed1795130 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -806,12 +806,9 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = dma_set_mask(dev->dev->dma_dev, mask);
- if (!err) {
- err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
- if (!err)
- break;
- }
+ err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = true;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index fce088e6f54..404d1daebef 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -282,9 +282,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
else
of_device_make_bus_id(&dev->dev);
- /* setup amba-specific device info */
- dev->dma_mask = ~0;
-
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
if (prop)
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 903e1285fda..96376152622 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2004,6 +2004,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
struct resource *ECR_res = NULL;
struct resource *EPP_res = NULL;
struct platform_device *pdev = NULL;
+ int ret;
if (!dev) {
/* We need a physical device to attach to, but none was
@@ -2014,8 +2015,11 @@ struct parport *parport_pc_probe_port(unsigned long int base,
return NULL;
dev = &pdev->dev;
- dev->coherent_dma_mask = DMA_BIT_MASK(24);
- dev->dma_mask = &dev->coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(24));
+ if (ret) {
+ dev_err(dev, "Unable to set coherent dma mask: disabling DMA\n");
+ dma = PARPORT_DMA_NONE;
+ }
}
ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d1549b74e2d..7bd7f0d5f05 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
host_dev = scsi_get_device(shost);
if (host_dev && host_dev->dma_mask)
- bounce_limit = *host_dev->dma_mask;
+ bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
return bounce_limit;
}
diff --git a/drivers/staging/dwc2/platform.c b/drivers/staging/dwc2/platform.c
index 76ae6e210f5..83ca1053bb1 100644
--- a/drivers/staging/dwc2/platform.c
+++ b/drivers/staging/dwc2/platform.c
@@ -100,8 +100,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
*/
if (!dev->dev.dma_mask)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
- if (!dev->dev.coherent_dma_mask)
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ return retval;
irq = platform_get_irq(dev, 0);
if (irq < 0) {
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index d9446c47bf2..820a332f318 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -4791,21 +4791,8 @@ static int et131x_pci_setup(struct pci_dev *pdev,
pci_set_master(pdev);
/* Check the DMA addressing support of this device */
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc < 0) {
- dev_err(&pdev->dev,
- "Unable to obtain 64 bit DMA for consistent allocations\n");
- goto err_release_res;
- }
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc < 0) {
- dev_err(&pdev->dev,
- "Unable to obtain 32 bit DMA for consistent allocations\n");
- goto err_release_res;
- }
- } else {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "No usable DMA addressing method\n");
rc = -EIO;
goto err_release_res;
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 4483d47f739..3d3a824f6de 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -815,6 +815,12 @@ static struct drm_driver imx_drm_driver = {
static int imx_drm_platform_probe(struct platform_device *pdev)
{
+ int ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
imx_drm_device->dev = &pdev->dev;
return drm_platform_init(&imx_drm_driver, pdev);
@@ -857,8 +863,6 @@ static int __init imx_drm_init(void)
goto err_pdev;
}
- imx_drm_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32),
-
ret = platform_driver_register(&imx_drm_pdrv);
if (ret)
goto err_pdrv;
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 670a56a834f..ce6ba987ec9 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -407,7 +407,9 @@ static int ipu_drm_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL);
if (!ipu_crtc)
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 90d6ac46935..081407be33a 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -901,10 +901,7 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int err;
struct dt3155_priv *pd;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- return -ENODEV;
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
return -ENODEV;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 023d3cb6aa0..bb5d976e5b8 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -115,10 +115,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
pdata.phy = data->phy;
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_clk;
if (data->usbmisc_data) {
ret = imx_usbmisc_init(data->usbmisc_data);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f2e88a3a11..8b20c70d91e 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -119,10 +119,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err1;
platform_set_drvdata(pdev, exynos);
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 67128be1e1b..6a2a65aa005 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -3078,7 +3078,9 @@ static int __init lpc32xx_udc_probe(struct platform_device *pdev)
udc->isp1301_i2c_client->addr);
pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto resource_fail;
udc->board = &lpc32xx_usbddata;
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index df13d425e9c..205f4a33658 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -227,8 +227,7 @@ static int bcma_hcd_probe(struct bcma_device *dev)
/* TODO: Probably need checks here; is the core connected? */
- if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
- dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index f417526fb1f..284f8417eae 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -96,10 +96,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail_create_hcd;
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 016352e0f5a..e97c198e052 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -84,10 +84,9 @@ static int exynos_ehci_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
exynos_setup_vbus_gpio(pdev);
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ab0397e4d8f..4c528b2c033 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -116,8 +116,10 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
* We can DMA from anywhere. But the descriptors must be in
* the lower 4GB.
*/
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &ehci_octeon_dma_mask;
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
if (!hcd)
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 78b01fa475b..6fa82d6b766 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -104,7 +104,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
struct resource *res;
struct usb_hcd *hcd;
void __iomem *regs;
- int ret = -ENODEV;
+ int ret;
int irq;
int i;
struct omap_hcd *omap;
@@ -144,11 +144,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+ ret = -ENODEV;
hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
dev_name(dev));
if (!hcd) {
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index d1dfb9db5b4..2ba76730e65 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -180,10 +180,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
* set. Since shared usb code relies on it, set it here for
* now. Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err1;
if (!request_mem_region(res->start, resource_size(res),
ehci_orion_hc_driver.description)) {
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index f6b790ca8cf..7f30b7168d5 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -78,7 +78,7 @@ static int ehci_platform_probe(struct platform_device *dev)
struct resource *res_mem;
struct usb_ehci_pdata *pdata;
int irq;
- int err = -ENOMEM;
+ int err;
if (usb_disabled())
return -ENODEV;
@@ -89,10 +89,10 @@ static int ehci_platform_probe(struct platform_device *dev)
*/
if (!dev_get_platdata(&dev->dev))
dev->dev.platform_data = &ehci_platform_defaults;
- if (!dev->dev.dma_mask)
- dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
- if (!dev->dev.coherent_dma_mask)
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
pdata = dev_get_platdata(&dev->dev);
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 1cf0adba3fc..ee6f9ffaa0e 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -81,10 +81,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail;
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index e6d8e26e48c..b9fd0396011 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -362,10 +362,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 476b5a5baf2..418444ebb1b 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -469,7 +469,7 @@ MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
static int ohci_at91_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- int i, gpio;
+ int i, gpio, ret;
enum of_gpio_flags flags;
struct at91_usbh_data *pdata;
u32 ports;
@@ -481,10 +481,9 @@ static int ohci_at91_of_init(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index a87baedc0aa..91ec9b2cd37 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -71,10 +71,9 @@ static int exynos_ohci_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
hcd = usb_create_hcd(&exynos_ohci_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 9ab7e24ba65..e99db8a6d55 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -181,8 +181,9 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail_disable;
dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
if (usb_disabled()) {
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
index 342dc7e543b..6c16dcef15c 100644
--- a/drivers/usb/host/ohci-octeon.c
+++ b/drivers/usb/host/ohci-octeon.c
@@ -127,8 +127,9 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
}
/* Ohci is a 32-bit device. */
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
if (!hcd)
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 408d06a6857..21457417a85 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -65,7 +65,7 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
struct usb_hcd *hcd = NULL;
void __iomem *regs = NULL;
struct resource *res;
- int ret = -ENODEV;
+ int ret;
int irq;
if (usb_disabled())
@@ -99,11 +99,11 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_io;
+ ret = -ENODEV;
hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
dev_name(dev));
if (!hcd) {
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index deea5d1d639..e89ac4d4b87 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -298,6 +298,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct pxaohci_platform_data *pdata;
u32 tmp;
+ int ret;
if (!np)
return 0;
@@ -306,10 +307,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 17b2a7dad77..aa9e127bbe7 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -185,6 +185,12 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
if (usb_disabled())
return -ENODEV;
+ /*
+ * We don't call dma_set_mask_and_coherent() here because the
+ * DMA mask has already been appropraitely setup by the core
+ * SA-1111 bus code (which includes bug workarounds.)
+ */
+
hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
if (!hcd)
return -ENOMEM;
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 31ff3fc4e26..6b02107d281 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -56,10 +56,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail;
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/ssb-hcd.c b/drivers/usb/host/ssb-hcd.c
index 74af2c6287d..0196f766df7 100644
--- a/drivers/usb/host/ssb-hcd.c
+++ b/drivers/usb/host/ssb-hcd.c
@@ -163,8 +163,7 @@ static int ssb_hcd_probe(struct ssb_device *dev,
/* TODO: Probably need checks here; is the core connected? */
- if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
- dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
usb_dev = kzalloc(sizeof(struct ssb_hcd_device), GFP_KERNEL);
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index ded842bc657..3003fefaa96 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -75,10 +75,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
pdev->name);
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 0a2cce7285b..afe4702a552 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -10,6 +10,7 @@
*
* ARM PrimeCell PL110 Color LCD Controller
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -551,6 +552,10 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
if (!board)
return -EINVAL;
+ ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out;
+
ret = amba_request_regions(dev, NULL);
if (ret) {
printk(KERN_ERR "CLCD: unable to reserve regs region\n");
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 43ec7e247a8..682df0e1954 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -30,7 +30,6 @@ struct amba_device {
struct device dev;
struct resource res;
struct clk *pclk;
- u64 dma_mask;
unsigned int periphid;
unsigned int irq[AMBA_NR_IRQS];
};
@@ -131,7 +130,6 @@ struct amba_device name##_device = { \
struct amba_device name##_device = { \
.dev = __AMBA_DEV(busid, data, ~0ULL), \
.res = DEFINE_RES_MEM(base, SZ_4K), \
- .dma_mask = ~0ULL, \
.irq = irqs, \
.periphid = id, \
}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 3a8d0a2af60..fd4aee29ad1 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -97,6 +97,30 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
}
#endif
+/*
+ * Set both the DMA mask and the coherent DMA mask to the same thing.
+ * Note that we don't check the return value from dma_set_coherent_mask()
+ * as the DMA API guarantees that the coherent DMA mask can be set to
+ * the same or smaller than the streaming DMA mask.
+ */
+static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+ int rc = dma_set_mask(dev, mask);
+ if (rc == 0)
+ dma_set_coherent_mask(dev, mask);
+ return rc;
+}
+
+/*
+ * Similar to the above, except it deals with the case where the device
+ * does not have dev->dma_mask appropriately setup.
+ */
+static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
+{
+ dev->dma_mask = &dev->coherent_dma_mask;
+ return dma_set_mask_and_coherent(dev, mask);
+}
+
extern u64 dma_get_required_mask(struct device *dev);
static inline unsigned int dma_get_max_seg_size(struct device *dev)
@@ -129,6 +153,13 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
+#ifndef dma_max_pfn
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+ return *dev->dma_mask >> PAGE_SHIFT;
+}
+#endif
+
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
diff --git a/sound/arm/pxa2xx-pcm.c b/sound/arm/pxa2xx-pcm.c
index 69a2455b447..e6c727b317f 100644
--- a/sound/arm/pxa2xx-pcm.c
+++ b/sound/arm/pxa2xx-pcm.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <sound/core.h>
@@ -83,8 +84,6 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
.mmap = pxa2xx_pcm_mmap,
};
-static u64 pxa2xx_pcm_dmamask = 0xffffffff;
-
int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
struct snd_pcm **rpcm)
{
@@ -100,10 +99,9 @@ int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
pcm->private_data = client;
pcm->private_free = pxa2xx_pcm_free_dma_buffers;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &pxa2xx_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = 0xffffffff;
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out;
if (play) {
int stream = SNDRV_PCM_STREAM_PLAYBACK;
diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c
index 612e5801003..8ae3fa5ac60 100644
--- a/sound/soc/atmel/atmel-pcm.c
+++ b/sound/soc/atmel/atmel-pcm.c
@@ -68,18 +68,15 @@ int atmel_pcm_mmap(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
-static u64 atmel_pcm_dmamask = DMA_BIT_MASK(32);
-
int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &atmel_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index 53f84085bf1..1d4c676eb6c 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -415,19 +415,16 @@ static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
}
}
-static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-
static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
pr_debug("%s enter\n", __func__);
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &bf5xx_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index 9cb4a80df98..2a5b43417fd 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -323,18 +323,16 @@ static struct snd_pcm_ops bf5xx_pcm_i2s_ops = {
.silence = bf5xx_pcm_silence,
};
-static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-
static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
+ int ret;
pr_debug("%s enter\n", __func__);
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &bf5xx_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_DEV, card->dev, size, size);
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index 443e9e599a7..fa64cd85204 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -843,18 +843,15 @@ static void davinci_pcm_free(struct snd_pcm *pcm)
}
}
-static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32);
-
static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &davinci_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = davinci_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index d1b111e7fc0..fb9bb9eb5ca 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -300,14 +300,11 @@ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- static u64 fsl_dma_dmamask = DMA_BIT_MASK(36);
int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &fsl_dma_dmamask;
-
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = fsl_dma_dmamask;
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(36));
+ if (ret)
+ return ret;
/* Some codecs have separate DAIs for playback and capture, so we
* should allocate a DMA buffer only for the streams that are valid.
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index 10e330514ed..41740e48882 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -254,18 +254,16 @@ static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
return 0;
}
-static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
-
static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
+
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &imx_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = imx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index 161e5055ce9..71bf2f248cd 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -301,7 +301,6 @@ static struct snd_pcm_ops psc_dma_ops = {
.hw_params = psc_dma_hw_params,
};
-static u64 psc_dma_dmamask = DMA_BIT_MASK(32);
static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
@@ -309,15 +308,14 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
struct snd_pcm *pcm = rtd->pcm;
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
size_t size = psc_dma_hardware.buffer_bytes_max;
- int rc = 0;
+ int rc;
dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
card, dai, pcm);
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &psc_dma_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ rc = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
diff --git a/sound/soc/jz4740/jz4740-pcm.c b/sound/soc/jz4740/jz4740-pcm.c
index 71005929231..1d7ef28585e 100644
--- a/sound/soc/jz4740/jz4740-pcm.c
+++ b/sound/soc/jz4740/jz4740-pcm.c
@@ -297,19 +297,15 @@ static void jz4740_pcm_free(struct snd_pcm *pcm)
}
}
-static u64 jz4740_pcm_dmamask = DMA_BIT_MASK(32);
-
static int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
-
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &jz4740_pcm_dmamask;
+ int ret;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = jz4740_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index 55d0d9d3a9f..4af1936cf0f 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -57,8 +57,6 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
.fifo_size = 0,
};
-static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
-
static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
{
struct kirkwood_dma_data *priv = dev_id;
@@ -290,10 +288,9 @@ static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
struct snd_pcm *pcm = rtd->pcm;
int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &kirkwood_dma_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = kirkwood_dma_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index c894ff0f258..f588ee45b4f 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -314,16 +314,15 @@ static void nuc900_dma_free_dma_buffers(struct snd_pcm *pcm)
snd_pcm_lib_preallocate_free_for_all(pcm);
}
-static u64 nuc900_pcm_dmamask = DMA_BIT_MASK(32);
static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
+ int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &nuc900_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
card->dev, 4 * 1024, (4 * 1024) - 1);
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index a11405de86e..b8fa9862e54 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -156,8 +156,6 @@ static struct snd_pcm_ops omap_pcm_ops = {
.mmap = omap_pcm_mmap,
};
-static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
-
static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
int stream)
{
@@ -202,12 +200,11 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &omap_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = omap_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 806da27b8b6..d58b09f4f7a 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -87,18 +87,15 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
.mmap = pxa2xx_pcm_mmap,
};
-static u64 pxa2xx_pcm_dmamask = DMA_BIT_MASK(32);
-
static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &pxa2xx_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
index 5cfaa5464eb..d219880815c 100644
--- a/sound/soc/s6000/s6000-pcm.c
+++ b/sound/soc/s6000/s6000-pcm.c
@@ -445,8 +445,6 @@ static void s6000_pcm_free(struct snd_pcm *pcm)
snd_pcm_lib_preallocate_free_for_all(pcm);
}
-static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
-
static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
{
struct snd_card *card = runtime->card->snd_card;
@@ -457,10 +455,9 @@ static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &s6000_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (res)
+ return res;
if (params->dma_in) {
s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index 9338d11e921..fe2748b494d 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -406,20 +406,17 @@ static void dma_free_dma_buffers(struct snd_pcm *pcm)
}
}
-static u64 dma_mask = DMA_BIT_MASK(32);
-
static int dma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
pr_debug("Entered %s\n", __func__);
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &dma_mask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = preallocate_dma_buffer(pcm,
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
index ce1e1e16f25..e4f318fc2f8 100644
--- a/sound/soc/samsung/idma.c
+++ b/sound/soc/samsung/idma.c
@@ -383,18 +383,15 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
return 0;
}
-static u64 idma_mask = DMA_BIT_MASK(32);
-
static int idma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &idma_mask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = preallocate_idma_buffer(pcm,