diff options
Diffstat (limited to 'arch/arm/plat-omap/dma.c')
-rw-r--r-- | arch/arm/plat-omap/dma.c | 67 |
1 files changed, 47 insertions, 20 deletions
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index 09d82b3c66c..f7f571e7987 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -29,6 +29,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> +#include <linux/slab.h> #include <asm/system.h> #include <mach/hardware.h> @@ -500,7 +501,8 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) burst = 0x2; break; } - /* not supported by current hardware on OMAP1 + /* + * not supported by current hardware on OMAP1 * w |= (0x03 << 7); * fall through */ @@ -509,7 +511,8 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) burst = 0x3; break; } - /* OMAP1 don't support burst 16 + /* + * OMAP1 don't support burst 16 * fall through */ default: @@ -603,7 +606,8 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) burst = 0x3; break; } - /* OMAP1 don't support burst 16 + /* + * OMAP1 don't support burst 16 * fall through */ default: @@ -708,6 +712,21 @@ static inline void omap2_enable_irq_lch(int lch) spin_unlock_irqrestore(&dma_chan_lock, flags); } +static inline void omap2_disable_irq_lch(int lch) +{ + u32 val; + unsigned long flags; + + if (!cpu_class_is_omap2()) + return; + + spin_lock_irqsave(&dma_chan_lock, flags); + val = dma_read(IRQENABLE_L0); + val &= ~(1 << lch); + dma_write(val, IRQENABLE_L0); + spin_unlock_irqrestore(&dma_chan_lock, flags); +} + int omap_request_dma(int dev_id, const char *dev_name, void (*callback)(int lch, u16 ch_status, void *data), void *data, int *dma_ch_out) @@ -806,14 +825,7 @@ void omap_free_dma(int lch) } if (cpu_class_is_omap2()) { - u32 val; - - spin_lock_irqsave(&dma_chan_lock, flags); - /* Disable interrupts */ - val = dma_read(IRQENABLE_L0); - val &= ~(1 << lch); - dma_write(val, IRQENABLE_L0); - spin_unlock_irqrestore(&dma_chan_lock, flags); + omap2_disable_irq_lch(lch); /* Clear the CSR register and IRQ status register */ dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); @@ -936,6 +948,15 @@ void omap_start_dma(int lch) { u32 l; + /* + * The CPC/CDAC register needs to be initialized to zero + * before starting dma transfer. + */ + if (cpu_is_omap15xx()) + dma_write(0, CPC(lch)); + else + dma_write(0, CDAC(lch)); + if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch; char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; @@ -1183,7 +1204,7 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue) } if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) || - (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) { + (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) { printk(KERN_ERR "omap_dma: You need to stop the DMA channels " "before unlinking\n"); dump_stack(); @@ -1267,8 +1288,10 @@ int omap_request_dma_chain(int dev_id, const char *dev_name, return -EINVAL; } - /* Allocate a queue to maintain the status of the channels - * in the chain */ + /* + * Allocate a queue to maintain the status of the channels + * in the chain + */ channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL); if (channels == NULL) { printk(KERN_ERR "omap_dma: No memory for channel queue\n"); @@ -1870,8 +1893,7 @@ static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id) #define omap1_dma_irq_handler NULL #endif -#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) || \ - defined(CONFIG_ARCH_OMAP4) +#ifdef CONFIG_ARCH_OMAP2PLUS static int omap2_dma_handle_ch(int ch) { @@ -1898,7 +1920,8 @@ static int omap2_dma_handle_ch(int ch) printk(KERN_INFO "DMA transaction error with device %d\n", dma_chan[ch].dev_id); if (cpu_class_is_omap2()) { - /* Errata: sDMA Channel is not disabled + /* + * Errata: sDMA Channel is not disabled * after a transaction error. So we explicitely * disable the channel */ @@ -2098,6 +2121,9 @@ static int __init omap_init_dma(void) for (ch = 0; ch < dma_chan_count; ch++) { omap_clear_dma(ch); + if (cpu_class_is_omap2()) + omap2_disable_irq_lch(ch); + dma_chan[ch].dev_id = -1; dma_chan[ch].next_lch = -1; @@ -2133,13 +2159,13 @@ static int __init omap_init_dma(void) if (cpu_class_is_omap2()) { int irq; if (cpu_is_omap44xx()) - irq = INT_44XX_SDMA_IRQ0; + irq = OMAP44XX_IRQ_SDMA_0; else irq = INT_24XX_SDMA_IRQ0; setup_irq(irq, &omap24xx_dma_irq); } - if (cpu_is_omap34xx()) { + if (cpu_is_omap34xx() || cpu_is_omap44xx()) { /* Enable smartidle idlemodes and autoidle */ u32 v = dma_read(OCP_SYSCONFIG); v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK | @@ -2150,7 +2176,8 @@ static int __init omap_init_dma(void) DMA_SYSCONFIG_AUTOIDLE); dma_write(v , OCP_SYSCONFIG); /* reserve dma channels 0 and 1 in high security devices */ - if (omap_type() != OMAP2_DEVICE_TYPE_GP) { + if (cpu_is_omap34xx() && + (omap_type() != OMAP2_DEVICE_TYPE_GP)) { printk(KERN_INFO "Reserving DMA channels 0 and 1 for " "HS ROM code\n"); dma_chan[0].dev_id = 0; |