From 272ca655090978bdaa2630fc44fb2c03da5576fd Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Wed, 6 Jan 2010 13:33:59 +0000 Subject: fsldma: reduce kernel text size Some of the functions are written in a way where they use multiple reads and writes where a single read/write pair could suffice. This shrinks the kernel text size measurably, while making the functions easier to understand. add/remove: 0/0 grow/shrink: 1/4 up/down: 4/-196 (-192) function old new delta fsl_chan_set_request_count 120 124 +4 dma_halt 300 272 -28 fsl_chan_set_src_loop_size 208 156 -52 fsl_chan_set_dest_loop_size 208 156 -52 fsl_chan_xfer_ld_queue 500 436 -64 Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 83 ++++++++++++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 38 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 296f9e747fa..0bad741765c 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -143,43 +143,45 @@ static int dma_is_idle(struct fsl_dma_chan *fsl_chan) static void dma_start(struct fsl_dma_chan *fsl_chan) { - u32 mr_set = 0; - - if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); - mr_set |= FSL_DMA_MR_EMP_EN; - } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) - & ~FSL_DMA_MR_EMP_EN, 32); + u32 mode; + + mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32); + + if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { + if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { + DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); + mode |= FSL_DMA_MR_EMP_EN; + } else { + mode &= ~FSL_DMA_MR_EMP_EN; + } } if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) - mr_set |= FSL_DMA_MR_EMS_EN; + mode |= FSL_DMA_MR_EMS_EN; else - mr_set |= FSL_DMA_MR_CS; + mode |= FSL_DMA_MR_CS; - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) - | mr_set, 32); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); } static void dma_halt(struct fsl_dma_chan *fsl_chan) { + u32 mode; int i; - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, - 32); - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS - | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); + mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32); + mode |= FSL_DMA_MR_CA; + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); + + mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); for (i = 0; i < 100; i++) { if (dma_is_idle(fsl_chan)) break; udelay(10); } + if (i >= 100 && !dma_is_idle(fsl_chan)) dev_err(fsl_chan->dev, "DMA halt timeout!\n"); } @@ -231,22 +233,23 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan, */ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) { + u32 mode; + + mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32); + switch (size) { case 0: - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & - (~FSL_DMA_MR_SAHE), 32); + mode &= ~FSL_DMA_MR_SAHE; break; case 1: case 2: case 4: case 8: - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | - FSL_DMA_MR_SAHE | (__ilog2(size) << 14), - 32); + mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); break; } + + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); } /** @@ -262,22 +265,23 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) */ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) { + u32 mode; + + mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32); + switch (size) { case 0: - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & - (~FSL_DMA_MR_DAHE), 32); + mode &= ~FSL_DMA_MR_DAHE; break; case 1: case 2: case 4: case 8: - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | - FSL_DMA_MR_DAHE | (__ilog2(size) << 16), - 32); + mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); break; } + + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); } /** @@ -294,11 +298,14 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) */ static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) { + u32 mode; + BUG_ON(size > 1024); - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) - | ((__ilog2(size) << 24) & 0x0f000000), - 32); + + mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32); + mode |= (__ilog2(size) << 24) & 0x0f000000; + + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); } /** -- cgit v1.2.3-70-g09d2 From 4ce0e953f6286777452bf07c83056342d6b9b257 Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Wed, 6 Jan 2010 13:34:00 +0000 Subject: fsldma: remove unused structure members Remove some unused members from the fsldma data structures. A few trivial uses of struct resource were converted to use the stack rather than keeping the memory allocated for the lifetime of the driver. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 17 ++++++++--------- drivers/dma/fsldma.h | 4 ---- 2 files changed, 8 insertions(+), 13 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 0bad741765c..0b4e6383f48 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1072,6 +1072,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, struct device_node *node, u32 feature, const char *compatible) { struct fsl_dma_chan *new_fsl_chan; + struct resource res; int err; /* alloc channel */ @@ -1083,7 +1084,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, } /* get dma channel register base */ - err = of_address_to_resource(node, 0, &new_fsl_chan->reg); + err = of_address_to_resource(node, 0, &res); if (err) { dev_err(fdev->dev, "Can't get %s property 'reg'\n", node->full_name); @@ -1101,10 +1102,8 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, WARN_ON(fdev->feature != new_fsl_chan->feature); new_fsl_chan->dev = fdev->dev; - new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, - new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); - - new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; + new_fsl_chan->reg_base = ioremap(res.start, resource_size(&res)); + new_fsl_chan->id = ((res.start - 0x100) & 0xfff) >> 7; if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { dev_err(fdev->dev, "There is no %d channel!\n", new_fsl_chan->id); @@ -1183,6 +1182,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, int err; struct fsl_dma_device *fdev; struct device_node *child; + struct resource res; fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); if (!fdev) { @@ -1193,7 +1193,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, INIT_LIST_HEAD(&fdev->common.channels); /* get DMA controller register base */ - err = of_address_to_resource(dev->node, 0, &fdev->reg); + err = of_address_to_resource(dev->node, 0, &res); if (err) { dev_err(&dev->dev, "Can't get %s property 'reg'\n", dev->node->full_name); @@ -1202,9 +1202,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " "controller at 0x%llx...\n", - match->compatible, (unsigned long long)fdev->reg.start); - fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end - - fdev->reg.start + 1); + match->compatible, (unsigned long long)res.start); + fdev->reg_base = ioremap(res.start, resource_size(&res)); dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 0df14cbb8ca..dbb5b5cce4c 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -92,8 +92,6 @@ struct fsl_desc_sw { struct list_head node; struct list_head tx_list; struct dma_async_tx_descriptor async_tx; - struct list_head *ld; - void *priv; } __attribute__((aligned(32))); struct fsl_dma_chan_regs { @@ -111,7 +109,6 @@ struct fsl_dma_chan; struct fsl_dma_device { void __iomem *reg_base; /* DGSR register base */ - struct resource reg; /* Resource for register */ struct device *dev; struct dma_device common; struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; @@ -138,7 +135,6 @@ struct fsl_dma_chan { struct dma_chan common; /* DMA common channel */ struct dma_pool *desc_pool; /* Descriptors pool */ struct device *dev; /* Channel device */ - struct resource reg; /* Resource for register */ int irq; /* Channel IRQ */ int id; /* Raw id of this channel */ struct tasklet_struct tasklet; -- cgit v1.2.3-70-g09d2 From a4f56d4b103d4e5d1a59a9118db0185a6bd1a83b Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Wed, 6 Jan 2010 13:34:01 +0000 Subject: fsldma: rename struct fsl_dma_chan to struct fsldma_chan This is the beginning of a cleanup which will change all instances of "fsl_dma" to "fsldma" to match the name of the driver itself. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 128 +++++++++++++++++++++++++++------------------------ drivers/dma/fsldma.h | 26 +++++------ 2 files changed, 81 insertions(+), 73 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 0b4e6383f48..6795d96e362 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -37,7 +37,7 @@ #include #include "fsldma.h" -static void dma_init(struct fsl_dma_chan *fsl_chan) +static void dma_init(struct fsldma_chan *fsl_chan) { /* Reset the channel */ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); @@ -64,23 +64,23 @@ static void dma_init(struct fsl_dma_chan *fsl_chan) } -static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) +static void set_sr(struct fsldma_chan *fsl_chan, u32 val) { DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); } -static u32 get_sr(struct fsl_dma_chan *fsl_chan) +static u32 get_sr(struct fsldma_chan *fsl_chan) { return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); } -static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, +static void set_desc_cnt(struct fsldma_chan *fsl_chan, struct fsl_dma_ld_hw *hw, u32 count) { hw->count = CPU_TO_DMA(fsl_chan, count, 32); } -static void set_desc_src(struct fsl_dma_chan *fsl_chan, +static void set_desc_src(struct fsldma_chan *fsl_chan, struct fsl_dma_ld_hw *hw, dma_addr_t src) { u64 snoop_bits; @@ -90,7 +90,7 @@ static void set_desc_src(struct fsl_dma_chan *fsl_chan, hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); } -static void set_desc_dest(struct fsl_dma_chan *fsl_chan, +static void set_desc_dest(struct fsldma_chan *fsl_chan, struct fsl_dma_ld_hw *hw, dma_addr_t dest) { u64 snoop_bits; @@ -100,7 +100,7 @@ static void set_desc_dest(struct fsl_dma_chan *fsl_chan, hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); } -static void set_desc_next(struct fsl_dma_chan *fsl_chan, +static void set_desc_next(struct fsldma_chan *fsl_chan, struct fsl_dma_ld_hw *hw, dma_addr_t next) { u64 snoop_bits; @@ -110,38 +110,38 @@ static void set_desc_next(struct fsl_dma_chan *fsl_chan, hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); } -static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) +static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr) { DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); } -static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) +static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan) { return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; } -static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) +static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr) { DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); } -static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) +static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan) { return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); } -static u32 get_bcr(struct fsl_dma_chan *fsl_chan) +static u32 get_bcr(struct fsldma_chan *fsl_chan) { return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); } -static int dma_is_idle(struct fsl_dma_chan *fsl_chan) +static int dma_is_idle(struct fsldma_chan *fsl_chan) { u32 sr = get_sr(fsl_chan); return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); } -static void dma_start(struct fsl_dma_chan *fsl_chan) +static void dma_start(struct fsldma_chan *fsl_chan) { u32 mode; @@ -164,7 +164,7 @@ static void dma_start(struct fsl_dma_chan *fsl_chan) DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); } -static void dma_halt(struct fsl_dma_chan *fsl_chan) +static void dma_halt(struct fsldma_chan *fsl_chan) { u32 mode; int i; @@ -186,7 +186,7 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan) dev_err(fsl_chan->dev, "DMA halt timeout!\n"); } -static void set_ld_eol(struct fsl_dma_chan *fsl_chan, +static void set_ld_eol(struct fsldma_chan *fsl_chan, struct fsl_desc_sw *desc) { u64 snoop_bits; @@ -199,7 +199,7 @@ static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | snoop_bits, 64); } -static void append_ld_queue(struct fsl_dma_chan *fsl_chan, +static void append_ld_queue(struct fsldma_chan *fsl_chan, struct fsl_desc_sw *new_desc) { struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); @@ -231,7 +231,7 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan, * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, * SA + 1 ... and so on. */ -static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) +static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) { u32 mode; @@ -263,7 +263,7 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, * TA + 1 ... and so on. */ -static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) +static void fsl_chan_set_dest_loop_size(struct fsldma_chan *fsl_chan, int size) { u32 mode; @@ -296,7 +296,7 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) * * A size of 0 disables external pause control. The maximum size is 1024. */ -static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) +static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size) { u32 mode; @@ -317,7 +317,7 @@ static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) * The DMA Request Count feature should be used in addition to this feature * to set the number of bytes to transfer before pausing the channel. */ -static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) +static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable) { if (enable) fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; @@ -335,7 +335,7 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) * transfer immediately. The DMA channel will wait for the * control pin asserted. */ -static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) +static void fsl_chan_toggle_ext_start(struct fsldma_chan *fsl_chan, int enable) { if (enable) fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; @@ -345,7 +345,7 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) { - struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); + struct fsldma_chan *fsl_chan = to_fsl_chan(tx->chan); struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; unsigned long flags; @@ -379,7 +379,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) * Return - The descriptor allocated. NULL for failed. */ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( - struct fsl_dma_chan *fsl_chan) + struct fsldma_chan *fsl_chan) { dma_addr_t pdesc; struct fsl_desc_sw *desc_sw; @@ -408,7 +408,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( */ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) { - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + struct fsldma_chan *fsl_chan = to_fsl_chan(chan); /* Has this channel already been allocated? */ if (fsl_chan->desc_pool) @@ -435,7 +435,7 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) */ static void fsl_dma_free_chan_resources(struct dma_chan *chan) { - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + struct fsldma_chan *fsl_chan = to_fsl_chan(chan); struct fsl_desc_sw *desc, *_desc; unsigned long flags; @@ -459,7 +459,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan) static struct dma_async_tx_descriptor * fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) { - struct fsl_dma_chan *fsl_chan; + struct fsldma_chan *fsl_chan; struct fsl_desc_sw *new; if (!chan) @@ -489,7 +489,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { - struct fsl_dma_chan *fsl_chan; + struct fsldma_chan *fsl_chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new; struct list_head *list; size_t copy; @@ -575,7 +575,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long flags) { - struct fsl_dma_chan *fsl_chan; + struct fsldma_chan *fsl_chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; struct fsl_dma_slave *slave; struct list_head *tx_list; @@ -759,7 +759,7 @@ fail: static void fsl_dma_device_terminate_all(struct dma_chan *chan) { - struct fsl_dma_chan *fsl_chan; + struct fsldma_chan *fsl_chan; struct fsl_desc_sw *desc, *tmp; unsigned long flags; @@ -786,7 +786,7 @@ static void fsl_dma_device_terminate_all(struct dma_chan *chan) * fsl_dma_update_completed_cookie - Update the completed cookie. * @fsl_chan : Freescale DMA channel */ -static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) +static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan) { struct fsl_desc_sw *cur_desc, *desc; dma_addr_t ld_phy; @@ -820,7 +820,7 @@ static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) * If 'in_intr' is set, the function will move the link descriptor to * the recycle list. Otherwise, free it directly. */ -static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) +static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan) { struct fsl_desc_sw *desc, *_desc; unsigned long flags; @@ -864,7 +864,7 @@ static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. * @fsl_chan : Freescale DMA channel */ -static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) +static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan) { struct list_head *ld_node; dma_addr_t next_dest_addr; @@ -912,7 +912,7 @@ out_unlock: */ static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) { - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + struct fsldma_chan *fsl_chan = to_fsl_chan(chan); #ifdef FSL_DMA_LD_DEBUG struct fsl_desc_sw *ld; @@ -949,7 +949,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, dma_cookie_t *done, dma_cookie_t *used) { - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + struct fsldma_chan *fsl_chan = to_fsl_chan(chan); dma_cookie_t last_used; dma_cookie_t last_complete; @@ -969,7 +969,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) { - struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; + struct fsldma_chan *fsl_chan = data; u32 stat; int update_cookie = 0; int xfer_ld_q = 0; @@ -1050,9 +1050,9 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) { - struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; - u32 gsr; + struct fsldma_device *fdev = data; int ch_nr; + u32 gsr; gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) : in_le32(fdev->reg_base); @@ -1064,19 +1064,23 @@ static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) static void dma_do_tasklet(unsigned long data) { - struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; + struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data; fsl_chan_ld_cleanup(fsl_chan); } -static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, +/*----------------------------------------------------------------------------*/ +/* OpenFirmware Subsystem */ +/*----------------------------------------------------------------------------*/ + +static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, struct device_node *node, u32 feature, const char *compatible) { - struct fsl_dma_chan *new_fsl_chan; + struct fsldma_chan *new_fsl_chan; struct resource res; int err; /* alloc channel */ - new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); + new_fsl_chan = kzalloc(sizeof(*new_fsl_chan), GFP_KERNEL); if (!new_fsl_chan) { dev_err(fdev->dev, "No free memory for allocating " "dma channels!\n"); @@ -1167,7 +1171,7 @@ err_no_reg: return err; } -static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) +static void fsl_dma_chan_remove(struct fsldma_chan *fchan) { if (fchan->irq != NO_IRQ) free_irq(fchan->irq, fchan); @@ -1176,15 +1180,15 @@ static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) kfree(fchan); } -static int __devinit of_fsl_dma_probe(struct of_device *dev, +static int __devinit fsldma_of_probe(struct of_device *dev, const struct of_device_id *match) { int err; - struct fsl_dma_device *fdev; + struct fsldma_device *fdev; struct device_node *child; struct resource res; - fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); + fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); if (!fdev) { dev_err(&dev->dev, "No enough memory for 'priv'\n"); return -ENOMEM; @@ -1256,9 +1260,9 @@ err_no_reg: return err; } -static int of_fsl_dma_remove(struct of_device *of_dev) +static int fsldma_of_remove(struct of_device *of_dev) { - struct fsl_dma_device *fdev; + struct fsldma_device *fdev; unsigned int i; fdev = dev_get_drvdata(&of_dev->dev); @@ -1280,39 +1284,43 @@ static int of_fsl_dma_remove(struct of_device *of_dev) return 0; } -static struct of_device_id of_fsl_dma_ids[] = { +static struct of_device_id fsldma_of_ids[] = { { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, {} }; -static struct of_platform_driver of_fsl_dma_driver = { - .name = "fsl-elo-dma", - .match_table = of_fsl_dma_ids, - .probe = of_fsl_dma_probe, - .remove = of_fsl_dma_remove, +static struct of_platform_driver fsldma_of_driver = { + .name = "fsl-elo-dma", + .match_table = fsldma_of_ids, + .probe = fsldma_of_probe, + .remove = fsldma_of_remove, }; -static __init int of_fsl_dma_init(void) +/*----------------------------------------------------------------------------*/ +/* Module Init / Exit */ +/*----------------------------------------------------------------------------*/ + +static __init int fsldma_init(void) { int ret; pr_info("Freescale Elo / Elo Plus DMA driver\n"); - ret = of_register_platform_driver(&of_fsl_dma_driver); + ret = of_register_platform_driver(&fsldma_of_driver); if (ret) pr_err("fsldma: failed to register platform driver\n"); return ret; } -static void __exit of_fsl_dma_exit(void) +static void __exit fsldma_exit(void) { - of_unregister_platform_driver(&of_fsl_dma_driver); + of_unregister_platform_driver(&fsldma_of_driver); } -subsys_initcall(of_fsl_dma_init); -module_exit(of_fsl_dma_exit); +subsys_initcall(fsldma_init); +module_exit(fsldma_exit); MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index dbb5b5cce4c..f8c2baa6f41 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -94,7 +94,7 @@ struct fsl_desc_sw { struct dma_async_tx_descriptor async_tx; } __attribute__((aligned(32))); -struct fsl_dma_chan_regs { +struct fsldma_chan_regs { u32 mr; /* 0x00 - Mode Register */ u32 sr; /* 0x04 - Status Register */ u64 cdar; /* 0x08 - Current descriptor address register */ @@ -104,19 +104,19 @@ struct fsl_dma_chan_regs { u64 ndar; /* 0x24 - Next Descriptor Address Register */ }; -struct fsl_dma_chan; +struct fsldma_chan; #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 -struct fsl_dma_device { +struct fsldma_device { void __iomem *reg_base; /* DGSR register base */ struct device *dev; struct dma_device common; - struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; + struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; u32 feature; /* The same as DMA channels */ int irq; /* Channel IRQ */ }; -/* Define macros for fsl_dma_chan->feature property */ +/* Define macros for fsldma_chan->feature property */ #define FSL_DMA_LITTLE_ENDIAN 0x00000000 #define FSL_DMA_BIG_ENDIAN 0x00000001 @@ -127,8 +127,8 @@ struct fsl_dma_device { #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 #define FSL_DMA_CHAN_START_EXT 0x00002000 -struct fsl_dma_chan { - struct fsl_dma_chan_regs __iomem *reg_base; +struct fsldma_chan { + struct fsldma_chan_regs __iomem *reg_base; dma_cookie_t completed_cookie; /* The maximum cookie completed */ spinlock_t desc_lock; /* Descriptor operation lock */ struct list_head ld_queue; /* Link descriptors queue */ @@ -140,14 +140,14 @@ struct fsl_dma_chan { struct tasklet_struct tasklet; u32 feature; - void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); - void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); - void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); - void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); - void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); + void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); + void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); + void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size); + void (*set_dest_loop_size)(struct fsldma_chan *fsl_chan, int size); + void (*set_request_count)(struct fsldma_chan *fsl_chan, int size); }; -#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) +#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common) #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) -- cgit v1.2.3-70-g09d2 From 738f5f7e1ae876448cb7d9c82bea258b69386647 Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Wed, 6 Jan 2010 13:34:02 +0000 Subject: fsldma: rename dest to dst for uniformity Most functions in the standard library use "dst" as a parameter, rather than "dest". This renames all use of "dest" to "dst" to match the usual convention. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 32 ++++++++++++++++---------------- drivers/dma/fsldma.h | 2 +- 2 files changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 6795d96e362..c2db7541c22 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -90,14 +90,14 @@ static void set_desc_src(struct fsldma_chan *fsl_chan, hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); } -static void set_desc_dest(struct fsldma_chan *fsl_chan, - struct fsl_dma_ld_hw *hw, dma_addr_t dest) +static void set_desc_dst(struct fsldma_chan *fsl_chan, + struct fsl_dma_ld_hw *hw, dma_addr_t dst) { u64 snoop_bits; snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; - hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); + hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dst, 64); } static void set_desc_next(struct fsldma_chan *fsl_chan, @@ -253,7 +253,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) } /** - * fsl_chan_set_dest_loop_size - Set destination address hold transfer size + * fsl_chan_set_dst_loop_size - Set destination address hold transfer size * @fsl_chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * @@ -263,7 +263,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, * TA + 1 ... and so on. */ -static void fsl_chan_set_dest_loop_size(struct fsldma_chan *fsl_chan, int size) +static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size) { u32 mode; @@ -486,7 +486,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) } static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( - struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, + struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, unsigned long flags) { struct fsldma_chan *fsl_chan; @@ -519,7 +519,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( set_desc_cnt(fsl_chan, &new->hw, copy); set_desc_src(fsl_chan, &new->hw, dma_src); - set_desc_dest(fsl_chan, &new->hw, dma_dest); + set_desc_dst(fsl_chan, &new->hw, dma_dst); if (!first) first = new; @@ -532,7 +532,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( prev = new; len -= copy; dma_src += copy; - dma_dest += copy; + dma_dst += copy; /* Insert the link descriptor to the LD ring */ list_add_tail(&new->node, &first->tx_list); @@ -680,7 +680,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( /* Fill in the descriptor */ set_desc_cnt(fsl_chan, &new->hw, copy); set_desc_src(fsl_chan, &new->hw, dma_src); - set_desc_dest(fsl_chan, &new->hw, dma_dst); + set_desc_dst(fsl_chan, &new->hw, dma_dst); /* * If this is not the first descriptor, chain the @@ -721,8 +721,8 @@ finished: if (fsl_chan->set_src_loop_size) fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); - if (fsl_chan->set_dest_loop_size) - fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size); + if (fsl_chan->set_dst_loop_size) + fsl_chan->set_dst_loop_size(fsl_chan, slave->dst_loop_size); if (fsl_chan->toggle_ext_start) fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); @@ -867,7 +867,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan) static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan) { struct list_head *ld_node; - dma_addr_t next_dest_addr; + dma_addr_t next_dst_addr; unsigned long flags; spin_lock_irqsave(&fsl_chan->desc_lock, flags); @@ -892,10 +892,10 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan) if (ld_node != &fsl_chan->ld_queue) { /* Get the ld start address from ld_queue */ - next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; + next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys; dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", - (unsigned long long)next_dest_addr); - set_cdar(fsl_chan, next_dest_addr); + (unsigned long long)next_dst_addr); + set_cdar(fsl_chan, next_dst_addr); dma_start(fsl_chan); } else { set_cdar(fsl_chan, 0); @@ -1130,7 +1130,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, case FSL_DMA_IP_83XX: new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; - new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; + new_fsl_chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; new_fsl_chan->set_request_count = fsl_chan_set_request_count; } diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index f8c2baa6f41..a67b8e3df0f 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -143,7 +143,7 @@ struct fsldma_chan { void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size); - void (*set_dest_loop_size)(struct fsldma_chan *fsl_chan, int size); + void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size); void (*set_request_count)(struct fsldma_chan *fsl_chan, int size); }; -- cgit v1.2.3-70-g09d2 From e7a29151de1bd52081f27f149b68074fac0323be Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Wed, 6 Jan 2010 13:34:03 +0000 Subject: fsldma: clean up the OF subsystem routines This fixes some errors in the cleanup paths of the OF subsystem, including missing checks for ioremap failing. Also, some variables were renamed for brevity. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 259 ++++++++++++++++++++++++++------------------------- drivers/dma/fsldma.h | 4 +- 2 files changed, 134 insertions(+), 129 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index c2db7541c22..507b29716bb 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -40,7 +40,7 @@ static void dma_init(struct fsldma_chan *fsl_chan) { /* Reset the channel */ - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); + DMA_OUT(fsl_chan, &fsl_chan->regs->mr, 0, 32); switch (fsl_chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: @@ -49,7 +49,7 @@ static void dma_init(struct fsldma_chan *fsl_chan) * EOSIE - End of segments interrupt enable (basic mode) * EOLNIE - End of links interrupt enable */ - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE + DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); break; case FSL_DMA_IP_83XX: @@ -57,7 +57,7 @@ static void dma_init(struct fsldma_chan *fsl_chan) * EOTIE - End-of-transfer interrupt enable * PRC_RM - PCI read multiple */ - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE + DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM, 32); break; } @@ -66,12 +66,12 @@ static void dma_init(struct fsldma_chan *fsl_chan) static void set_sr(struct fsldma_chan *fsl_chan, u32 val) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); + DMA_OUT(fsl_chan, &fsl_chan->regs->sr, val, 32); } static u32 get_sr(struct fsldma_chan *fsl_chan) { - return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); + return DMA_IN(fsl_chan, &fsl_chan->regs->sr, 32); } static void set_desc_cnt(struct fsldma_chan *fsl_chan, @@ -112,27 +112,27 @@ static void set_desc_next(struct fsldma_chan *fsl_chan, static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); + DMA_OUT(fsl_chan, &fsl_chan->regs->cdar, addr | FSL_DMA_SNEN, 64); } static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan) { - return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; + return DMA_IN(fsl_chan, &fsl_chan->regs->cdar, 64) & ~FSL_DMA_SNEN; } static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); + DMA_OUT(fsl_chan, &fsl_chan->regs->ndar, addr, 64); } static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan) { - return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); + return DMA_IN(fsl_chan, &fsl_chan->regs->ndar, 64); } static u32 get_bcr(struct fsldma_chan *fsl_chan) { - return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); + return DMA_IN(fsl_chan, &fsl_chan->regs->bcr, 32); } static int dma_is_idle(struct fsldma_chan *fsl_chan) @@ -145,11 +145,11 @@ static void dma_start(struct fsldma_chan *fsl_chan) { u32 mode; - mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32); + mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); + DMA_OUT(fsl_chan, &fsl_chan->regs->bcr, 0, 32); mode |= FSL_DMA_MR_EMP_EN; } else { mode &= ~FSL_DMA_MR_EMP_EN; @@ -161,7 +161,7 @@ static void dma_start(struct fsldma_chan *fsl_chan) else mode |= FSL_DMA_MR_CS; - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); + DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); } static void dma_halt(struct fsldma_chan *fsl_chan) @@ -169,12 +169,12 @@ static void dma_halt(struct fsldma_chan *fsl_chan) u32 mode; int i; - mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32); + mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); mode |= FSL_DMA_MR_CA; - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); + DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); + DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); for (i = 0; i < 100; i++) { if (dma_is_idle(fsl_chan)) @@ -235,7 +235,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) { u32 mode; - mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32); + mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); switch (size) { case 0: @@ -249,7 +249,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) break; } - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); + DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); } /** @@ -267,7 +267,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size) { u32 mode; - mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32); + mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); switch (size) { case 0: @@ -281,7 +281,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size) break; } - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); + DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); } /** @@ -302,10 +302,10 @@ static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size) BUG_ON(size > 1024); - mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32); + mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); mode |= (__ilog2(size) << 24) & 0x0f000000; - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); + DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); } /** @@ -967,7 +967,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, return dma_async_is_complete(cookie, last_complete, last_used); } -static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) +static irqreturn_t fsldma_chan_irq(int irq, void *data) { struct fsldma_chan *fsl_chan = data; u32 stat; @@ -1048,17 +1048,17 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) +static irqreturn_t fsldma_irq(int irq, void *data) { struct fsldma_device *fdev = data; int ch_nr; u32 gsr; - gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) - : in_le32(fdev->reg_base); + gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) + : in_le32(fdev->regs); ch_nr = (32 - ffs(gsr)) / 8; - return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, + return fdev->chan[ch_nr] ? fsldma_chan_irq(irq, fdev->chan[ch_nr]) : IRQ_NONE; } @@ -1075,140 +1075,142 @@ static void dma_do_tasklet(unsigned long data) static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, struct device_node *node, u32 feature, const char *compatible) { - struct fsldma_chan *new_fsl_chan; + struct fsldma_chan *fchan; struct resource res; int err; /* alloc channel */ - new_fsl_chan = kzalloc(sizeof(*new_fsl_chan), GFP_KERNEL); - if (!new_fsl_chan) { - dev_err(fdev->dev, "No free memory for allocating " - "dma channels!\n"); - return -ENOMEM; + fchan = kzalloc(sizeof(*fchan), GFP_KERNEL); + if (!fchan) { + dev_err(fdev->dev, "no free memory for DMA channels!\n"); + err = -ENOMEM; + goto out_return; + } + + /* ioremap registers for use */ + fchan->regs = of_iomap(node, 0); + if (!fchan->regs) { + dev_err(fdev->dev, "unable to ioremap registers\n"); + err = -ENOMEM; + goto out_free_fchan; } - /* get dma channel register base */ err = of_address_to_resource(node, 0, &res); if (err) { - dev_err(fdev->dev, "Can't get %s property 'reg'\n", - node->full_name); - goto err_no_reg; + dev_err(fdev->dev, "unable to find 'reg' property\n"); + goto out_iounmap_regs; } - new_fsl_chan->feature = feature; - + fchan->feature = feature; if (!fdev->feature) - fdev->feature = new_fsl_chan->feature; + fdev->feature = fchan->feature; - /* If the DMA device's feature is different than its channels', - * report the bug. + /* + * If the DMA device's feature is different than the feature + * of its channels, report the bug */ - WARN_ON(fdev->feature != new_fsl_chan->feature); - - new_fsl_chan->dev = fdev->dev; - new_fsl_chan->reg_base = ioremap(res.start, resource_size(&res)); - new_fsl_chan->id = ((res.start - 0x100) & 0xfff) >> 7; - if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { - dev_err(fdev->dev, "There is no %d channel!\n", - new_fsl_chan->id); + WARN_ON(fdev->feature != fchan->feature); + + fchan->dev = fdev->dev; + fchan->id = ((res.start - 0x100) & 0xfff) >> 7; + if (fchan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { + dev_err(fdev->dev, "too many channels for device\n"); err = -EINVAL; - goto err_no_chan; + goto out_iounmap_regs; } - fdev->chan[new_fsl_chan->id] = new_fsl_chan; - tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, - (unsigned long)new_fsl_chan); - /* Init the channel */ - dma_init(new_fsl_chan); + fdev->chan[fchan->id] = fchan; + tasklet_init(&fchan->tasklet, dma_do_tasklet, (unsigned long)fchan); + + /* Initialize the channel */ + dma_init(fchan); /* Clear cdar registers */ - set_cdar(new_fsl_chan, 0); + set_cdar(fchan, 0); - switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { + switch (fchan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: - new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; + fchan->toggle_ext_pause = fsl_chan_toggle_ext_pause; case FSL_DMA_IP_83XX: - new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; - new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; - new_fsl_chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; - new_fsl_chan->set_request_count = fsl_chan_set_request_count; + fchan->toggle_ext_start = fsl_chan_toggle_ext_start; + fchan->set_src_loop_size = fsl_chan_set_src_loop_size; + fchan->set_dst_loop_size = fsl_chan_set_dst_loop_size; + fchan->set_request_count = fsl_chan_set_request_count; } - spin_lock_init(&new_fsl_chan->desc_lock); - INIT_LIST_HEAD(&new_fsl_chan->ld_queue); + spin_lock_init(&fchan->desc_lock); + INIT_LIST_HEAD(&fchan->ld_queue); - new_fsl_chan->common.device = &fdev->common; + fchan->common.device = &fdev->common; /* Add the channel to DMA device channel list */ - list_add_tail(&new_fsl_chan->common.device_node, - &fdev->common.channels); + list_add_tail(&fchan->common.device_node, &fdev->common.channels); fdev->common.chancnt++; - new_fsl_chan->irq = irq_of_parse_and_map(node, 0); - if (new_fsl_chan->irq != NO_IRQ) { - err = request_irq(new_fsl_chan->irq, - &fsl_dma_chan_do_interrupt, IRQF_SHARED, - "fsldma-channel", new_fsl_chan); + fchan->irq = irq_of_parse_and_map(node, 0); + if (fchan->irq != NO_IRQ) { + err = request_irq(fchan->irq, &fsldma_chan_irq, + IRQF_SHARED, "fsldma-channel", fchan); if (err) { - dev_err(fdev->dev, "DMA channel %s request_irq error " - "with return %d\n", node->full_name, err); - goto err_no_irq; + dev_err(fdev->dev, "unable to request IRQ " + "for channel %d\n", fchan->id); + goto out_list_del; } } - dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, - compatible, - new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq); + dev_info(fdev->dev, "#%d (%s), irq %d\n", fchan->id, compatible, + fchan->irq != NO_IRQ ? fchan->irq : fdev->irq); return 0; -err_no_irq: - list_del(&new_fsl_chan->common.device_node); -err_no_chan: - iounmap(new_fsl_chan->reg_base); -err_no_reg: - kfree(new_fsl_chan); +out_list_del: + irq_dispose_mapping(fchan->irq); + list_del_init(&fchan->common.device_node); +out_iounmap_regs: + iounmap(fchan->regs); +out_free_fchan: + kfree(fchan); +out_return: return err; } static void fsl_dma_chan_remove(struct fsldma_chan *fchan) { - if (fchan->irq != NO_IRQ) + if (fchan->irq != NO_IRQ) { free_irq(fchan->irq, fchan); + irq_dispose_mapping(fchan->irq); + } + list_del(&fchan->common.device_node); - iounmap(fchan->reg_base); + iounmap(fchan->regs); kfree(fchan); } -static int __devinit fsldma_of_probe(struct of_device *dev, +static int __devinit fsldma_of_probe(struct of_device *op, const struct of_device_id *match) { - int err; struct fsldma_device *fdev; struct device_node *child; - struct resource res; + int err; fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); if (!fdev) { - dev_err(&dev->dev, "No enough memory for 'priv'\n"); - return -ENOMEM; + dev_err(&op->dev, "No enough memory for 'priv'\n"); + err = -ENOMEM; + goto out_return; } - fdev->dev = &dev->dev; + + fdev->dev = &op->dev; INIT_LIST_HEAD(&fdev->common.channels); - /* get DMA controller register base */ - err = of_address_to_resource(dev->node, 0, &res); - if (err) { - dev_err(&dev->dev, "Can't get %s property 'reg'\n", - dev->node->full_name); - goto err_no_reg; + /* ioremap the registers for use */ + fdev->regs = of_iomap(op->node, 0); + if (!fdev->regs) { + dev_err(&op->dev, "unable to ioremap registers\n"); + err = -ENOMEM; + goto out_free_fdev; } - dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " - "controller at 0x%llx...\n", - match->compatible, (unsigned long long)res.start); - fdev->reg_base = ioremap(res.start, resource_size(&res)); - dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); @@ -1220,66 +1222,69 @@ static int __devinit fsldma_of_probe(struct of_device *dev, fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; fdev->common.device_terminate_all = fsl_dma_device_terminate_all; - fdev->common.dev = &dev->dev; + fdev->common.dev = &op->dev; - fdev->irq = irq_of_parse_and_map(dev->node, 0); + fdev->irq = irq_of_parse_and_map(op->node, 0); if (fdev->irq != NO_IRQ) { - err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED, - "fsldma-device", fdev); + err = request_irq(fdev->irq, &fsldma_irq, IRQF_SHARED, + "fsldma-device", fdev); if (err) { - dev_err(&dev->dev, "DMA device request_irq error " - "with return %d\n", err); - goto err; + dev_err(&op->dev, "unable to request IRQ\n"); + goto out_iounmap_regs; } } - dev_set_drvdata(&(dev->dev), fdev); + dev_set_drvdata(&op->dev, fdev); - /* We cannot use of_platform_bus_probe() because there is no - * of_platform_bus_remove. Instead, we manually instantiate every DMA + /* + * We cannot use of_platform_bus_probe() because there is no + * of_platform_bus_remove(). Instead, we manually instantiate every DMA * channel object. */ - for_each_child_of_node(dev->node, child) { - if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) + for_each_child_of_node(op->node, child) { + if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, "fsl,eloplus-dma-channel"); - if (of_device_is_compatible(child, "fsl,elo-dma-channel")) + } + + if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, "fsl,elo-dma-channel"); + } } dma_async_device_register(&fdev->common); return 0; -err: - iounmap(fdev->reg_base); -err_no_reg: +out_iounmap_regs: + iounmap(fdev->regs); +out_free_fdev: kfree(fdev); +out_return: return err; } -static int fsldma_of_remove(struct of_device *of_dev) +static int fsldma_of_remove(struct of_device *op) { struct fsldma_device *fdev; unsigned int i; - fdev = dev_get_drvdata(&of_dev->dev); - + fdev = dev_get_drvdata(&op->dev); dma_async_device_unregister(&fdev->common); - for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { if (fdev->chan[i]) fsl_dma_chan_remove(fdev->chan[i]); + } if (fdev->irq != NO_IRQ) free_irq(fdev->irq, fdev); - iounmap(fdev->reg_base); - + iounmap(fdev->regs); + dev_set_drvdata(&op->dev, NULL); kfree(fdev); - dev_set_drvdata(&of_dev->dev, NULL); return 0; } diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index a67b8e3df0f..ea3b19c8708 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -108,7 +108,7 @@ struct fsldma_chan; #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 struct fsldma_device { - void __iomem *reg_base; /* DGSR register base */ + void __iomem *regs; /* DGSR register base */ struct device *dev; struct dma_device common; struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; @@ -128,7 +128,7 @@ struct fsldma_device { #define FSL_DMA_CHAN_START_EXT 0x00002000 struct fsldma_chan { - struct fsldma_chan_regs __iomem *reg_base; + struct fsldma_chan_regs __iomem *regs; dma_cookie_t completed_cookie; /* The maximum cookie completed */ spinlock_t desc_lock; /* Descriptor operation lock */ struct list_head ld_queue; /* Link descriptors queue */ -- cgit v1.2.3-70-g09d2 From d3f620b2c4fecdc8e060b70e8d92d29fc01c6126 Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Wed, 6 Jan 2010 13:34:04 +0000 Subject: fsldma: simplify IRQ probing and handling The IRQ probing is needlessly complex. All off the 83xx device trees in arch/powerpc/boot/dts/ specify 5 interrupts per DMA controller: one for the controller, and one for each channel. These interrupts are all attached to the same IRQ line. This causes an interesting situation if two channels interrupt at the same time. The per-controller handler will handle the first channel, and the per-channel handler will handle the remaining channels. Instead of this mess, we fix the bug in the per-controller handler, and make it handle all channels that generated an interrupt. When a per-controller handler is specified in the device tree, we prefer to use the shared handler instead of the per-channel handler. The 85xx/86xx controllers do not have a per-controller interrupt, and instead use a per-channel interrupt. This behavior has not been changed. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- Documentation/powerpc/dts-bindings/fsl/dma.txt | 8 ++ drivers/dma/fsldma.c | 173 ++++++++++++++++++------- 2 files changed, 137 insertions(+), 44 deletions(-) (limited to 'drivers/dma') diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/powerpc/dts-bindings/fsl/dma.txt index 0732cdd05ba..2a4b4bce611 100644 --- a/Documentation/powerpc/dts-bindings/fsl/dma.txt +++ b/Documentation/powerpc/dts-bindings/fsl/dma.txt @@ -44,21 +44,29 @@ Example: compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; cell-index = <0>; reg = <0 0x80>; + interrupt-parent = <&ipic>; + interrupts = <71 8>; }; dma-channel@80 { compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; cell-index = <1>; reg = <0x80 0x80>; + interrupt-parent = <&ipic>; + interrupts = <71 8>; }; dma-channel@100 { compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; cell-index = <2>; reg = <0x100 0x80>; + interrupt-parent = <&ipic>; + interrupts = <71 8>; }; dma-channel@180 { compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; cell-index = <3>; reg = <0x180 0x80>; + interrupt-parent = <&ipic>; + interrupts = <71 8>; }; }; diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 507b29716bb..6a905929ef0 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -967,6 +967,10 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, return dma_async_is_complete(cookie, last_complete, last_used); } +/*----------------------------------------------------------------------------*/ +/* Interrupt Handling */ +/*----------------------------------------------------------------------------*/ + static irqreturn_t fsldma_chan_irq(int irq, void *data) { struct fsldma_chan *fsl_chan = data; @@ -1048,24 +1052,116 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t fsldma_irq(int irq, void *data) +static void dma_do_tasklet(unsigned long data) +{ + struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data; + fsl_chan_ld_cleanup(fsl_chan); +} + +static irqreturn_t fsldma_ctrl_irq(int irq, void *data) { struct fsldma_device *fdev = data; - int ch_nr; - u32 gsr; + struct fsldma_chan *chan; + unsigned int handled = 0; + u32 gsr, mask; + int i; gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) - : in_le32(fdev->regs); - ch_nr = (32 - ffs(gsr)) / 8; + : in_le32(fdev->regs); + mask = 0xff000000; + dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); - return fdev->chan[ch_nr] ? fsldma_chan_irq(irq, - fdev->chan[ch_nr]) : IRQ_NONE; + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + if (gsr & mask) { + dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); + fsldma_chan_irq(irq, chan); + handled++; + } + + gsr &= ~mask; + mask >>= 8; + } + + return IRQ_RETVAL(handled); } -static void dma_do_tasklet(unsigned long data) +static void fsldma_free_irqs(struct fsldma_device *fdev) { - struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data; - fsl_chan_ld_cleanup(fsl_chan); + struct fsldma_chan *chan; + int i; + + if (fdev->irq != NO_IRQ) { + dev_dbg(fdev->dev, "free per-controller IRQ\n"); + free_irq(fdev->irq, fdev); + return; + } + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (chan && chan->irq != NO_IRQ) { + dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); + free_irq(chan->irq, chan); + } + } +} + +static int fsldma_request_irqs(struct fsldma_device *fdev) +{ + struct fsldma_chan *chan; + int ret; + int i; + + /* if we have a per-controller IRQ, use that */ + if (fdev->irq != NO_IRQ) { + dev_dbg(fdev->dev, "request per-controller IRQ\n"); + ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, + "fsldma-controller", fdev); + return ret; + } + + /* no per-controller IRQ, use the per-channel IRQs */ + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + if (chan->irq == NO_IRQ) { + dev_err(fdev->dev, "no interrupts property defined for " + "DMA channel %d. Please fix your " + "device tree\n", chan->id); + ret = -ENODEV; + goto out_unwind; + } + + dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); + ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, + "fsldma-chan", chan); + if (ret) { + dev_err(fdev->dev, "unable to request IRQ for DMA " + "channel %d\n", chan->id); + goto out_unwind; + } + } + + return 0; + +out_unwind: + for (/* none */; i >= 0; i--) { + chan = fdev->chan[i]; + if (!chan) + continue; + + if (chan->irq == NO_IRQ) + continue; + + free_irq(chan->irq, chan); + } + + return ret; } /*----------------------------------------------------------------------------*/ @@ -1143,29 +1239,18 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, fchan->common.device = &fdev->common; + /* find the IRQ line, if it exists in the device tree */ + fchan->irq = irq_of_parse_and_map(node, 0); + /* Add the channel to DMA device channel list */ list_add_tail(&fchan->common.device_node, &fdev->common.channels); fdev->common.chancnt++; - fchan->irq = irq_of_parse_and_map(node, 0); - if (fchan->irq != NO_IRQ) { - err = request_irq(fchan->irq, &fsldma_chan_irq, - IRQF_SHARED, "fsldma-channel", fchan); - if (err) { - dev_err(fdev->dev, "unable to request IRQ " - "for channel %d\n", fchan->id); - goto out_list_del; - } - } - dev_info(fdev->dev, "#%d (%s), irq %d\n", fchan->id, compatible, fchan->irq != NO_IRQ ? fchan->irq : fdev->irq); return 0; -out_list_del: - irq_dispose_mapping(fchan->irq); - list_del_init(&fchan->common.device_node); out_iounmap_regs: iounmap(fchan->regs); out_free_fchan: @@ -1176,11 +1261,7 @@ out_return: static void fsl_dma_chan_remove(struct fsldma_chan *fchan) { - if (fchan->irq != NO_IRQ) { - free_irq(fchan->irq, fchan); - irq_dispose_mapping(fchan->irq); - } - + irq_dispose_mapping(fchan->irq); list_del(&fchan->common.device_node); iounmap(fchan->regs); kfree(fchan); @@ -1211,6 +1292,9 @@ static int __devinit fsldma_of_probe(struct of_device *op, goto out_free_fdev; } + /* map the channel IRQ if it exists, but don't hookup the handler yet */ + fdev->irq = irq_of_parse_and_map(op->node, 0); + dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); @@ -1224,16 +1308,6 @@ static int __devinit fsldma_of_probe(struct of_device *op, fdev->common.device_terminate_all = fsl_dma_device_terminate_all; fdev->common.dev = &op->dev; - fdev->irq = irq_of_parse_and_map(op->node, 0); - if (fdev->irq != NO_IRQ) { - err = request_irq(fdev->irq, &fsldma_irq, IRQF_SHARED, - "fsldma-device", fdev); - if (err) { - dev_err(&op->dev, "unable to request IRQ\n"); - goto out_iounmap_regs; - } - } - dev_set_drvdata(&op->dev, fdev); /* @@ -1255,12 +1329,24 @@ static int __devinit fsldma_of_probe(struct of_device *op, } } + /* + * Hookup the IRQ handler(s) + * + * If we have a per-controller interrupt, we prefer that to the + * per-channel interrupts to reduce the number of shared interrupt + * handlers on the same IRQ line + */ + err = fsldma_request_irqs(fdev); + if (err) { + dev_err(fdev->dev, "unable to request IRQs\n"); + goto out_free_fdev; + } + dma_async_device_register(&fdev->common); return 0; -out_iounmap_regs: - iounmap(fdev->regs); out_free_fdev: + irq_dispose_mapping(fdev->irq); kfree(fdev); out_return: return err; @@ -1274,14 +1360,13 @@ static int fsldma_of_remove(struct of_device *op) fdev = dev_get_drvdata(&op->dev); dma_async_device_unregister(&fdev->common); + fsldma_free_irqs(fdev); + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { if (fdev->chan[i]) fsl_dma_chan_remove(fdev->chan[i]); } - if (fdev->irq != NO_IRQ) - free_irq(fdev->irq, fdev); - iounmap(fdev->regs); dev_set_drvdata(&op->dev, NULL); kfree(fdev); -- cgit v1.2.3-70-g09d2 From a1c03319018061304be28d131073ac13a5cb86fb Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Wed, 6 Jan 2010 13:34:05 +0000 Subject: fsldma: rename fsl_chan to chan The name fsl_chan seems too long, so it has been shortened to chan. There are only a few places where the higher level "struct dma_chan *chan" name conflicts. These have been changed to "struct dma_chan *dchan" instead. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 550 +++++++++++++++++++++++++-------------------------- 1 file changed, 275 insertions(+), 275 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 6a905929ef0..7b5f88cb495 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -37,19 +37,19 @@ #include #include "fsldma.h" -static void dma_init(struct fsldma_chan *fsl_chan) +static void dma_init(struct fsldma_chan *chan) { /* Reset the channel */ - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, 0, 32); + DMA_OUT(chan, &chan->regs->mr, 0, 32); - switch (fsl_chan->feature & FSL_DMA_IP_MASK) { + switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: /* Set the channel to below modes: * EIE - Error interrupt enable * EOSIE - End of segments interrupt enable (basic mode) * EOLNIE - End of links interrupt enable */ - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EIE + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); break; case FSL_DMA_IP_83XX: @@ -57,154 +57,154 @@ static void dma_init(struct fsldma_chan *fsl_chan) * EOTIE - End-of-transfer interrupt enable * PRC_RM - PCI read multiple */ - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EOTIE + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM, 32); break; } } -static void set_sr(struct fsldma_chan *fsl_chan, u32 val) +static void set_sr(struct fsldma_chan *chan, u32 val) { - DMA_OUT(fsl_chan, &fsl_chan->regs->sr, val, 32); + DMA_OUT(chan, &chan->regs->sr, val, 32); } -static u32 get_sr(struct fsldma_chan *fsl_chan) +static u32 get_sr(struct fsldma_chan *chan) { - return DMA_IN(fsl_chan, &fsl_chan->regs->sr, 32); + return DMA_IN(chan, &chan->regs->sr, 32); } -static void set_desc_cnt(struct fsldma_chan *fsl_chan, +static void set_desc_cnt(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, u32 count) { - hw->count = CPU_TO_DMA(fsl_chan, count, 32); + hw->count = CPU_TO_DMA(chan, count, 32); } -static void set_desc_src(struct fsldma_chan *fsl_chan, +static void set_desc_src(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t src) { u64 snoop_bits; - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; - hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); + hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); } -static void set_desc_dst(struct fsldma_chan *fsl_chan, +static void set_desc_dst(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t dst) { u64 snoop_bits; - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; - hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dst, 64); + hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); } -static void set_desc_next(struct fsldma_chan *fsl_chan, +static void set_desc_next(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t next) { u64 snoop_bits; - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0; - hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); + hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); } -static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr) +static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) { - DMA_OUT(fsl_chan, &fsl_chan->regs->cdar, addr | FSL_DMA_SNEN, 64); + DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); } -static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan) +static dma_addr_t get_cdar(struct fsldma_chan *chan) { - return DMA_IN(fsl_chan, &fsl_chan->regs->cdar, 64) & ~FSL_DMA_SNEN; + return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; } -static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr) +static void set_ndar(struct fsldma_chan *chan, dma_addr_t addr) { - DMA_OUT(fsl_chan, &fsl_chan->regs->ndar, addr, 64); + DMA_OUT(chan, &chan->regs->ndar, addr, 64); } -static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan) +static dma_addr_t get_ndar(struct fsldma_chan *chan) { - return DMA_IN(fsl_chan, &fsl_chan->regs->ndar, 64); + return DMA_IN(chan, &chan->regs->ndar, 64); } -static u32 get_bcr(struct fsldma_chan *fsl_chan) +static u32 get_bcr(struct fsldma_chan *chan) { - return DMA_IN(fsl_chan, &fsl_chan->regs->bcr, 32); + return DMA_IN(chan, &chan->regs->bcr, 32); } -static int dma_is_idle(struct fsldma_chan *fsl_chan) +static int dma_is_idle(struct fsldma_chan *chan) { - u32 sr = get_sr(fsl_chan); + u32 sr = get_sr(chan); return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); } -static void dma_start(struct fsldma_chan *fsl_chan) +static void dma_start(struct fsldma_chan *chan) { u32 mode; - mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); + mode = DMA_IN(chan, &chan->regs->mr, 32); - if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { - if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { - DMA_OUT(fsl_chan, &fsl_chan->regs->bcr, 0, 32); + if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { + if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { + DMA_OUT(chan, &chan->regs->bcr, 0, 32); mode |= FSL_DMA_MR_EMP_EN; } else { mode &= ~FSL_DMA_MR_EMP_EN; } } - if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) + if (chan->feature & FSL_DMA_CHAN_START_EXT) mode |= FSL_DMA_MR_EMS_EN; else mode |= FSL_DMA_MR_CS; - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); + DMA_OUT(chan, &chan->regs->mr, mode, 32); } -static void dma_halt(struct fsldma_chan *fsl_chan) +static void dma_halt(struct fsldma_chan *chan) { u32 mode; int i; - mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); + mode = DMA_IN(chan, &chan->regs->mr, 32); mode |= FSL_DMA_MR_CA; - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); + DMA_OUT(chan, &chan->regs->mr, mode, 32); mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); + DMA_OUT(chan, &chan->regs->mr, mode, 32); for (i = 0; i < 100; i++) { - if (dma_is_idle(fsl_chan)) + if (dma_is_idle(chan)) break; udelay(10); } - if (i >= 100 && !dma_is_idle(fsl_chan)) - dev_err(fsl_chan->dev, "DMA halt timeout!\n"); + if (i >= 100 && !dma_is_idle(chan)) + dev_err(chan->dev, "DMA halt timeout!\n"); } -static void set_ld_eol(struct fsldma_chan *fsl_chan, +static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { u64 snoop_bits; - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0; - desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, - DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL + desc->hw.next_ln_addr = CPU_TO_DMA(chan, + DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | snoop_bits, 64); } -static void append_ld_queue(struct fsldma_chan *fsl_chan, +static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *new_desc) { - struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); + struct fsl_desc_sw *queue_tail = to_fsl_desc(chan->ld_queue.prev); - if (list_empty(&fsl_chan->ld_queue)) + if (list_empty(&chan->ld_queue)) return; /* Link to the new descriptor physical address and @@ -214,15 +214,15 @@ static void append_ld_queue(struct fsldma_chan *fsl_chan, * * For FSL_DMA_IP_83xx, the snoop enable bit need be set. */ - queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, + queue_tail->hw.next_ln_addr = CPU_TO_DMA(chan, new_desc->async_tx.phys | FSL_DMA_EOSIE | - (((fsl_chan->feature & FSL_DMA_IP_MASK) + (((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); } /** * fsl_chan_set_src_loop_size - Set source address hold transfer size - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * * The set source address hold transfer size. The source @@ -231,11 +231,11 @@ static void append_ld_queue(struct fsldma_chan *fsl_chan, * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, * SA + 1 ... and so on. */ -static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) +static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) { u32 mode; - mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); + mode = DMA_IN(chan, &chan->regs->mr, 32); switch (size) { case 0: @@ -249,12 +249,12 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) break; } - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); + DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** * fsl_chan_set_dst_loop_size - Set destination address hold transfer size - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * * The set destination address hold transfer size. The destination @@ -263,11 +263,11 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, * TA + 1 ... and so on. */ -static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size) +static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) { u32 mode; - mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); + mode = DMA_IN(chan, &chan->regs->mr, 32); switch (size) { case 0: @@ -281,12 +281,12 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size) break; } - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); + DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** * fsl_chan_set_request_count - Set DMA Request Count for external control - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * @size : Number of bytes to transfer in a single request * * The Freescale DMA channel can be controlled by the external signal DREQ#. @@ -296,38 +296,38 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size) * * A size of 0 disables external pause control. The maximum size is 1024. */ -static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size) +static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) { u32 mode; BUG_ON(size > 1024); - mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); + mode = DMA_IN(chan, &chan->regs->mr, 32); mode |= (__ilog2(size) << 24) & 0x0f000000; - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); + DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** * fsl_chan_toggle_ext_pause - Toggle channel external pause status - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * @enable : 0 is disabled, 1 is enabled. * * The Freescale DMA channel can be controlled by the external signal DREQ#. * The DMA Request Count feature should be used in addition to this feature * to set the number of bytes to transfer before pausing the channel. */ -static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable) +static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) { if (enable) - fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; + chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; else - fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; + chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; } /** * fsl_chan_toggle_ext_start - Toggle channel external start status - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * @enable : 0 is disabled, 1 is enabled. * * If enable the external start, the channel can be started by an @@ -335,26 +335,26 @@ static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable) * transfer immediately. The DMA channel will wait for the * control pin asserted. */ -static void fsl_chan_toggle_ext_start(struct fsldma_chan *fsl_chan, int enable) +static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) { if (enable) - fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; + chan->feature |= FSL_DMA_CHAN_START_EXT; else - fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; + chan->feature &= ~FSL_DMA_CHAN_START_EXT; } static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) { - struct fsldma_chan *fsl_chan = to_fsl_chan(tx->chan); + struct fsldma_chan *chan = to_fsl_chan(tx->chan); struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; unsigned long flags; dma_cookie_t cookie; /* cookie increment and adding to ld_queue must be atomic */ - spin_lock_irqsave(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); - cookie = fsl_chan->common.cookie; + cookie = chan->common.cookie; list_for_each_entry(child, &desc->tx_list, node) { cookie++; if (cookie < 0) @@ -363,33 +363,33 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) desc->async_tx.cookie = cookie; } - fsl_chan->common.cookie = cookie; - append_ld_queue(fsl_chan, desc); - list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); + chan->common.cookie = cookie; + append_ld_queue(chan, desc); + list_splice_init(&desc->tx_list, chan->ld_queue.prev); - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + spin_unlock_irqrestore(&chan->desc_lock, flags); return cookie; } /** * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * * Return - The descriptor allocated. NULL for failed. */ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( - struct fsldma_chan *fsl_chan) + struct fsldma_chan *chan) { dma_addr_t pdesc; struct fsl_desc_sw *desc_sw; - desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); + desc_sw = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); if (desc_sw) { memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); INIT_LIST_HEAD(&desc_sw->tx_list); dma_async_tx_descriptor_init(&desc_sw->async_tx, - &fsl_chan->common); + &chan->common); desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; desc_sw->async_tx.phys = pdesc; } @@ -400,29 +400,29 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( /** * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * * This function will create a dma pool for descriptor allocation. * * Return - The number of descriptors allocated. */ -static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) +static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) { - struct fsldma_chan *fsl_chan = to_fsl_chan(chan); + struct fsldma_chan *chan = to_fsl_chan(dchan); /* Has this channel already been allocated? */ - if (fsl_chan->desc_pool) + if (chan->desc_pool) return 1; /* We need the descriptor to be aligned to 32bytes * for meeting FSL DMA specification requirement. */ - fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", - fsl_chan->dev, sizeof(struct fsl_desc_sw), + chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", + chan->dev, sizeof(struct fsl_desc_sw), 32, 0); - if (!fsl_chan->desc_pool) { - dev_err(fsl_chan->dev, "No memory for channel %d " - "descriptor dma pool.\n", fsl_chan->id); + if (!chan->desc_pool) { + dev_err(chan->dev, "No memory for channel %d " + "descriptor dma pool.\n", chan->id); return 0; } @@ -431,45 +431,45 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) /** * fsl_dma_free_chan_resources - Free all resources of the channel. - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel */ -static void fsl_dma_free_chan_resources(struct dma_chan *chan) +static void fsl_dma_free_chan_resources(struct dma_chan *dchan) { - struct fsldma_chan *fsl_chan = to_fsl_chan(chan); + struct fsldma_chan *chan = to_fsl_chan(dchan); struct fsl_desc_sw *desc, *_desc; unsigned long flags; - dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); - spin_lock_irqsave(&fsl_chan->desc_lock, flags); - list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { + dev_dbg(chan->dev, "Free all channel resources.\n"); + spin_lock_irqsave(&chan->desc_lock, flags); + list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) { #ifdef FSL_DMA_LD_DEBUG - dev_dbg(fsl_chan->dev, + dev_dbg(chan->dev, "LD %p will be released.\n", desc); #endif list_del(&desc->node); /* free link descriptor */ - dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); - dma_pool_destroy(fsl_chan->desc_pool); + spin_unlock_irqrestore(&chan->desc_lock, flags); + dma_pool_destroy(chan->desc_pool); - fsl_chan->desc_pool = NULL; + chan->desc_pool = NULL; } static struct dma_async_tx_descriptor * -fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) +fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) { - struct fsldma_chan *fsl_chan; + struct fsldma_chan *chan; struct fsl_desc_sw *new; - if (!chan) + if (!dchan) return NULL; - fsl_chan = to_fsl_chan(chan); + chan = to_fsl_chan(dchan); - new = fsl_dma_alloc_descriptor(fsl_chan); + new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); + dev_err(chan->dev, "No free memory for link descriptor\n"); return NULL; } @@ -480,51 +480,51 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) list_add_tail(&new->node, &new->tx_list); /* Set End-of-link to the last link descriptor of new list*/ - set_ld_eol(fsl_chan, new); + set_ld_eol(chan, new); return &new->async_tx; } static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( - struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src, + struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, unsigned long flags) { - struct fsldma_chan *fsl_chan; + struct fsldma_chan *chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new; struct list_head *list; size_t copy; - if (!chan) + if (!dchan) return NULL; if (!len) return NULL; - fsl_chan = to_fsl_chan(chan); + chan = to_fsl_chan(dchan); do { /* Allocate the link descriptor from DMA pool */ - new = fsl_dma_alloc_descriptor(fsl_chan); + new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(fsl_chan->dev, + dev_err(chan->dev, "No free memory for link descriptor\n"); goto fail; } #ifdef FSL_DMA_LD_DEBUG - dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); + dev_dbg(chan->dev, "new link desc alloc %p\n", new); #endif copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); - set_desc_cnt(fsl_chan, &new->hw, copy); - set_desc_src(fsl_chan, &new->hw, dma_src); - set_desc_dst(fsl_chan, &new->hw, dma_dst); + set_desc_cnt(chan, &new->hw, copy); + set_desc_src(chan, &new->hw, dma_src); + set_desc_dst(chan, &new->hw, dma_dst); if (!first) first = new; else - set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); + set_desc_next(chan, &prev->hw, new->async_tx.phys); new->async_tx.cookie = 0; async_tx_ack(&new->async_tx); @@ -542,7 +542,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( new->async_tx.cookie = -EBUSY; /* Set End-of-link to the last link descriptor of new list*/ - set_ld_eol(fsl_chan, new); + set_ld_eol(chan, new); return &first->async_tx; @@ -553,7 +553,7 @@ fail: list = &first->tx_list; list_for_each_entry_safe_reverse(new, prev, list, node) { list_del(&new->node); - dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); + dma_pool_free(chan->desc_pool, new, new->async_tx.phys); } return NULL; @@ -572,10 +572,10 @@ fail: * chan->private variable. */ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( - struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, + struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long flags) { - struct fsldma_chan *fsl_chan; + struct fsldma_chan *chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; struct fsl_dma_slave *slave; struct list_head *tx_list; @@ -588,14 +588,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( struct fsl_dma_hw_addr *hw; dma_addr_t dma_dst, dma_src; - if (!chan) + if (!dchan) return NULL; - if (!chan->private) + if (!dchan->private) return NULL; - fsl_chan = to_fsl_chan(chan); - slave = chan->private; + chan = to_fsl_chan(dchan); + slave = dchan->private; if (list_empty(&slave->addresses)) return NULL; @@ -644,14 +644,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( } /* Allocate the link descriptor from DMA pool */ - new = fsl_dma_alloc_descriptor(fsl_chan); + new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(fsl_chan->dev, "No free memory for " + dev_err(chan->dev, "No free memory for " "link descriptor\n"); goto fail; } #ifdef FSL_DMA_LD_DEBUG - dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); + dev_dbg(chan->dev, "new link desc alloc %p\n", new); #endif /* @@ -678,9 +678,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( } /* Fill in the descriptor */ - set_desc_cnt(fsl_chan, &new->hw, copy); - set_desc_src(fsl_chan, &new->hw, dma_src); - set_desc_dst(fsl_chan, &new->hw, dma_dst); + set_desc_cnt(chan, &new->hw, copy); + set_desc_src(chan, &new->hw, dma_src); + set_desc_dst(chan, &new->hw, dma_dst); /* * If this is not the first descriptor, chain the @@ -689,7 +689,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( if (!first) { first = new; } else { - set_desc_next(fsl_chan, &prev->hw, + set_desc_next(chan, &prev->hw, new->async_tx.phys); } @@ -715,23 +715,23 @@ finished: new->async_tx.cookie = -EBUSY; /* Set End-of-link to the last link descriptor of new list */ - set_ld_eol(fsl_chan, new); + set_ld_eol(chan, new); /* Enable extra controller features */ - if (fsl_chan->set_src_loop_size) - fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); + if (chan->set_src_loop_size) + chan->set_src_loop_size(chan, slave->src_loop_size); - if (fsl_chan->set_dst_loop_size) - fsl_chan->set_dst_loop_size(fsl_chan, slave->dst_loop_size); + if (chan->set_dst_loop_size) + chan->set_dst_loop_size(chan, slave->dst_loop_size); - if (fsl_chan->toggle_ext_start) - fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); + if (chan->toggle_ext_start) + chan->toggle_ext_start(chan, slave->external_start); - if (fsl_chan->toggle_ext_pause) - fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); + if (chan->toggle_ext_pause) + chan->toggle_ext_pause(chan, slave->external_pause); - if (fsl_chan->set_request_count) - fsl_chan->set_request_count(fsl_chan, slave->request_count); + if (chan->set_request_count) + chan->set_request_count(chan, slave->request_count); return &first->async_tx; @@ -751,62 +751,62 @@ fail: tx_list = &first->tx_list; list_for_each_entry_safe_reverse(new, prev, tx_list, node) { list_del_init(&new->node); - dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); + dma_pool_free(chan->desc_pool, new, new->async_tx.phys); } return NULL; } -static void fsl_dma_device_terminate_all(struct dma_chan *chan) +static void fsl_dma_device_terminate_all(struct dma_chan *dchan) { - struct fsldma_chan *fsl_chan; + struct fsldma_chan *chan; struct fsl_desc_sw *desc, *tmp; unsigned long flags; - if (!chan) + if (!dchan) return; - fsl_chan = to_fsl_chan(chan); + chan = to_fsl_chan(dchan); /* Halt the DMA engine */ - dma_halt(fsl_chan); + dma_halt(chan); - spin_lock_irqsave(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); /* Remove and free all of the descriptors in the LD queue */ - list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { + list_for_each_entry_safe(desc, tmp, &chan->ld_queue, node) { list_del(&desc->node); - dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + spin_unlock_irqrestore(&chan->desc_lock, flags); } /** * fsl_dma_update_completed_cookie - Update the completed cookie. - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel */ -static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan) +static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) { struct fsl_desc_sw *cur_desc, *desc; dma_addr_t ld_phy; - ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; + ld_phy = get_cdar(chan) & FSL_DMA_NLDA_MASK; if (ld_phy) { cur_desc = NULL; - list_for_each_entry(desc, &fsl_chan->ld_queue, node) + list_for_each_entry(desc, &chan->ld_queue, node) if (desc->async_tx.phys == ld_phy) { cur_desc = desc; break; } if (cur_desc && cur_desc->async_tx.cookie) { - if (dma_is_idle(fsl_chan)) - fsl_chan->completed_cookie = + if (dma_is_idle(chan)) + chan->completed_cookie = cur_desc->async_tx.cookie; else - fsl_chan->completed_cookie = + chan->completed_cookie = cur_desc->async_tx.cookie - 1; } } @@ -814,27 +814,27 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan) /** * fsl_chan_ld_cleanup - Clean up link descriptors - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * * This function clean up the ld_queue of DMA channel. * If 'in_intr' is set, the function will move the link descriptor to * the recycle list. Otherwise, free it directly. */ -static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan) +static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) { struct fsl_desc_sw *desc, *_desc; unsigned long flags; - spin_lock_irqsave(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); - dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", - fsl_chan->completed_cookie); - list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { + dev_dbg(chan->dev, "chan completed_cookie = %d\n", + chan->completed_cookie); + list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) { dma_async_tx_callback callback; void *callback_param; if (dma_async_is_complete(desc->async_tx.cookie, - fsl_chan->completed_cookie, fsl_chan->common.cookie) + chan->completed_cookie, chan->common.cookie) == DMA_IN_PROGRESS) break; @@ -844,119 +844,119 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan) /* Remove from ld_queue list */ list_del(&desc->node); - dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", + dev_dbg(chan->dev, "link descriptor %p will be recycle.\n", desc); - dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); /* Run the link descriptor callback function */ if (callback) { - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); - dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", + spin_unlock_irqrestore(&chan->desc_lock, flags); + dev_dbg(chan->dev, "link descriptor %p callback\n", desc); callback(callback_param); - spin_lock_irqsave(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); } } - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + spin_unlock_irqrestore(&chan->desc_lock, flags); } /** * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel */ -static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan) +static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) { struct list_head *ld_node; dma_addr_t next_dst_addr; unsigned long flags; - spin_lock_irqsave(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); - if (!dma_is_idle(fsl_chan)) + if (!dma_is_idle(chan)) goto out_unlock; - dma_halt(fsl_chan); + dma_halt(chan); /* If there are some link descriptors * not transfered in queue. We need to start it. */ /* Find the first un-transfer desciptor */ - for (ld_node = fsl_chan->ld_queue.next; - (ld_node != &fsl_chan->ld_queue) + for (ld_node = chan->ld_queue.next; + (ld_node != &chan->ld_queue) && (dma_async_is_complete( to_fsl_desc(ld_node)->async_tx.cookie, - fsl_chan->completed_cookie, - fsl_chan->common.cookie) == DMA_SUCCESS); + chan->completed_cookie, + chan->common.cookie) == DMA_SUCCESS); ld_node = ld_node->next); - if (ld_node != &fsl_chan->ld_queue) { + if (ld_node != &chan->ld_queue) { /* Get the ld start address from ld_queue */ next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys; - dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", + dev_dbg(chan->dev, "xfer LDs staring from 0x%llx\n", (unsigned long long)next_dst_addr); - set_cdar(fsl_chan, next_dst_addr); - dma_start(fsl_chan); + set_cdar(chan, next_dst_addr); + dma_start(chan); } else { - set_cdar(fsl_chan, 0); - set_ndar(fsl_chan, 0); + set_cdar(chan, 0); + set_ndar(chan, 0); } out_unlock: - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + spin_unlock_irqrestore(&chan->desc_lock, flags); } /** * fsl_dma_memcpy_issue_pending - Issue the DMA start command - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel */ -static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) +static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) { - struct fsldma_chan *fsl_chan = to_fsl_chan(chan); + struct fsldma_chan *chan = to_fsl_chan(dchan); #ifdef FSL_DMA_LD_DEBUG struct fsl_desc_sw *ld; unsigned long flags; - spin_lock_irqsave(&fsl_chan->desc_lock, flags); - if (list_empty(&fsl_chan->ld_queue)) { - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); + if (list_empty(&chan->ld_queue)) { + spin_unlock_irqrestore(&chan->desc_lock, flags); return; } - dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); - list_for_each_entry(ld, &fsl_chan->ld_queue, node) { + dev_dbg(chan->dev, "--memcpy issue--\n"); + list_for_each_entry(ld, &chan->ld_queue, node) { int i; - dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", - fsl_chan->id, ld->async_tx.phys); + dev_dbg(chan->dev, "Ch %d, LD %08x\n", + chan->id, ld->async_tx.phys); for (i = 0; i < 8; i++) - dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", + dev_dbg(chan->dev, "LD offset %d: %08x\n", i, *(((u32 *)&ld->hw) + i)); } - dev_dbg(fsl_chan->dev, "----------------\n"); - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + dev_dbg(chan->dev, "----------------\n"); + spin_unlock_irqrestore(&chan->desc_lock, flags); #endif - fsl_chan_xfer_ld_queue(fsl_chan); + fsl_chan_xfer_ld_queue(chan); } /** * fsl_dma_is_complete - Determine the DMA status - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel */ -static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, +static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) { - struct fsldma_chan *fsl_chan = to_fsl_chan(chan); + struct fsldma_chan *chan = to_fsl_chan(dchan); dma_cookie_t last_used; dma_cookie_t last_complete; - fsl_chan_ld_cleanup(fsl_chan); + fsl_chan_ld_cleanup(chan); - last_used = chan->cookie; - last_complete = fsl_chan->completed_cookie; + last_used = dchan->cookie; + last_complete = chan->completed_cookie; if (done) *done = last_complete; @@ -973,30 +973,30 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, static irqreturn_t fsldma_chan_irq(int irq, void *data) { - struct fsldma_chan *fsl_chan = data; - u32 stat; + struct fsldma_chan *chan = data; int update_cookie = 0; int xfer_ld_q = 0; + u32 stat; - stat = get_sr(fsl_chan); - dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", - fsl_chan->id, stat); - set_sr(fsl_chan, stat); /* Clear the event register */ + stat = get_sr(chan); + dev_dbg(chan->dev, "event: channel %d, stat = 0x%x\n", + chan->id, stat); + set_sr(chan, stat); /* Clear the event register */ stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); if (!stat) return IRQ_NONE; if (stat & FSL_DMA_SR_TE) - dev_err(fsl_chan->dev, "Transfer Error!\n"); + dev_err(chan->dev, "Transfer Error!\n"); /* Programming Error * The DMA_INTERRUPT async_tx is a NULL transfer, which will * triger a PE interrupt. */ if (stat & FSL_DMA_SR_PE) { - dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); - if (get_bcr(fsl_chan) == 0) { + dev_dbg(chan->dev, "event: Programming Error INT\n"); + if (get_bcr(chan) == 0) { /* BCR register is 0, this is a DMA_INTERRUPT async_tx. * Now, update the completed cookie, and continue the * next uncompleted transfer. @@ -1011,10 +1011,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) * we will recycle the used descriptor. */ if (stat & FSL_DMA_SR_EOSI) { - dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); - dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", - (unsigned long long)get_cdar(fsl_chan), - (unsigned long long)get_ndar(fsl_chan)); + dev_dbg(chan->dev, "event: End-of-segments INT\n"); + dev_dbg(chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", + (unsigned long long)get_cdar(chan), + (unsigned long long)get_ndar(chan)); stat &= ~FSL_DMA_SR_EOSI; update_cookie = 1; } @@ -1023,7 +1023,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) * and start the next transfer if it exist. */ if (stat & FSL_DMA_SR_EOCDI) { - dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); + dev_dbg(chan->dev, "event: End-of-Chain link INT\n"); stat &= ~FSL_DMA_SR_EOCDI; update_cookie = 1; xfer_ld_q = 1; @@ -1034,28 +1034,28 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) * prepare next transfer. */ if (stat & FSL_DMA_SR_EOLNI) { - dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); + dev_dbg(chan->dev, "event: End-of-link INT\n"); stat &= ~FSL_DMA_SR_EOLNI; xfer_ld_q = 1; } if (update_cookie) - fsl_dma_update_completed_cookie(fsl_chan); + fsl_dma_update_completed_cookie(chan); if (xfer_ld_q) - fsl_chan_xfer_ld_queue(fsl_chan); + fsl_chan_xfer_ld_queue(chan); if (stat) - dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", + dev_dbg(chan->dev, "event: unhandled sr 0x%02x\n", stat); - dev_dbg(fsl_chan->dev, "event: Exit\n"); - tasklet_schedule(&fsl_chan->tasklet); + dev_dbg(chan->dev, "event: Exit\n"); + tasklet_schedule(&chan->tasklet); return IRQ_HANDLED; } static void dma_do_tasklet(unsigned long data) { - struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data; - fsl_chan_ld_cleanup(fsl_chan); + struct fsldma_chan *chan = (struct fsldma_chan *)data; + fsl_chan_ld_cleanup(chan); } static irqreturn_t fsldma_ctrl_irq(int irq, void *data) @@ -1171,24 +1171,24 @@ out_unwind: static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, struct device_node *node, u32 feature, const char *compatible) { - struct fsldma_chan *fchan; + struct fsldma_chan *chan; struct resource res; int err; /* alloc channel */ - fchan = kzalloc(sizeof(*fchan), GFP_KERNEL); - if (!fchan) { + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) { dev_err(fdev->dev, "no free memory for DMA channels!\n"); err = -ENOMEM; goto out_return; } /* ioremap registers for use */ - fchan->regs = of_iomap(node, 0); - if (!fchan->regs) { + chan->regs = of_iomap(node, 0); + if (!chan->regs) { dev_err(fdev->dev, "unable to ioremap registers\n"); err = -ENOMEM; - goto out_free_fchan; + goto out_free_chan; } err = of_address_to_resource(node, 0, &res); @@ -1197,74 +1197,74 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, goto out_iounmap_regs; } - fchan->feature = feature; + chan->feature = feature; if (!fdev->feature) - fdev->feature = fchan->feature; + fdev->feature = chan->feature; /* * If the DMA device's feature is different than the feature * of its channels, report the bug */ - WARN_ON(fdev->feature != fchan->feature); + WARN_ON(fdev->feature != chan->feature); - fchan->dev = fdev->dev; - fchan->id = ((res.start - 0x100) & 0xfff) >> 7; - if (fchan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { + chan->dev = fdev->dev; + chan->id = ((res.start - 0x100) & 0xfff) >> 7; + if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { dev_err(fdev->dev, "too many channels for device\n"); err = -EINVAL; goto out_iounmap_regs; } - fdev->chan[fchan->id] = fchan; - tasklet_init(&fchan->tasklet, dma_do_tasklet, (unsigned long)fchan); + fdev->chan[chan->id] = chan; + tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); /* Initialize the channel */ - dma_init(fchan); + dma_init(chan); /* Clear cdar registers */ - set_cdar(fchan, 0); + set_cdar(chan, 0); - switch (fchan->feature & FSL_DMA_IP_MASK) { + switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: - fchan->toggle_ext_pause = fsl_chan_toggle_ext_pause; + chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; case FSL_DMA_IP_83XX: - fchan->toggle_ext_start = fsl_chan_toggle_ext_start; - fchan->set_src_loop_size = fsl_chan_set_src_loop_size; - fchan->set_dst_loop_size = fsl_chan_set_dst_loop_size; - fchan->set_request_count = fsl_chan_set_request_count; + chan->toggle_ext_start = fsl_chan_toggle_ext_start; + chan->set_src_loop_size = fsl_chan_set_src_loop_size; + chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; + chan->set_request_count = fsl_chan_set_request_count; } - spin_lock_init(&fchan->desc_lock); - INIT_LIST_HEAD(&fchan->ld_queue); + spin_lock_init(&chan->desc_lock); + INIT_LIST_HEAD(&chan->ld_queue); - fchan->common.device = &fdev->common; + chan->common.device = &fdev->common; /* find the IRQ line, if it exists in the device tree */ - fchan->irq = irq_of_parse_and_map(node, 0); + chan->irq = irq_of_parse_and_map(node, 0); /* Add the channel to DMA device channel list */ - list_add_tail(&fchan->common.device_node, &fdev->common.channels); + list_add_tail(&chan->common.device_node, &fdev->common.channels); fdev->common.chancnt++; - dev_info(fdev->dev, "#%d (%s), irq %d\n", fchan->id, compatible, - fchan->irq != NO_IRQ ? fchan->irq : fdev->irq); + dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, + chan->irq != NO_IRQ ? chan->irq : fdev->irq); return 0; out_iounmap_regs: - iounmap(fchan->regs); -out_free_fchan: - kfree(fchan); + iounmap(chan->regs); +out_free_chan: + kfree(chan); out_return: return err; } -static void fsl_dma_chan_remove(struct fsldma_chan *fchan) +static void fsl_dma_chan_remove(struct fsldma_chan *chan) { - irq_dispose_mapping(fchan->irq); - list_del(&fchan->common.device_node); - iounmap(fchan->regs); - kfree(fchan); + irq_dispose_mapping(chan->irq); + list_del(&chan->common.device_node); + iounmap(chan->regs); + kfree(chan); } static int __devinit fsldma_of_probe(struct of_device *op, -- cgit v1.2.3-70-g09d2 From 9c3a50b7d7ec45da34e73cac66cde12dd6092dd8 Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Wed, 6 Jan 2010 13:34:06 +0000 Subject: fsldma: major cleanups and fixes Fix locking. Use two queues in the driver, one for pending transacions, and one for transactions which are actually running on the hardware. Call dma_run_dependencies() on descriptor cleanup so that the async_tx API works correctly. There are a number of places throughout the code where lists of descriptors are freed in a loop. Create functions to handle this, and use them instead of open-coding the loop each time. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 386 +++++++++++++++++++++++++++------------------------ drivers/dma/fsldma.h | 3 +- 2 files changed, 207 insertions(+), 182 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 7b5f88cb495..19011c20390 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -61,7 +61,6 @@ static void dma_init(struct fsldma_chan *chan) | FSL_DMA_MR_PRC_RM, 32); break; } - } static void set_sr(struct fsldma_chan *chan, u32 val) @@ -120,11 +119,6 @@ static dma_addr_t get_cdar(struct fsldma_chan *chan) return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; } -static void set_ndar(struct fsldma_chan *chan, dma_addr_t addr) -{ - DMA_OUT(chan, &chan->regs->ndar, addr, 64); -} - static dma_addr_t get_ndar(struct fsldma_chan *chan) { return DMA_IN(chan, &chan->regs->ndar, 64); @@ -178,11 +172,12 @@ static void dma_halt(struct fsldma_chan *chan) for (i = 0; i < 100; i++) { if (dma_is_idle(chan)) - break; + return; + udelay(10); } - if (i >= 100 && !dma_is_idle(chan)) + if (!dma_is_idle(chan)) dev_err(chan->dev, "DMA halt timeout!\n"); } @@ -199,27 +194,6 @@ static void set_ld_eol(struct fsldma_chan *chan, | snoop_bits, 64); } -static void append_ld_queue(struct fsldma_chan *chan, - struct fsl_desc_sw *new_desc) -{ - struct fsl_desc_sw *queue_tail = to_fsl_desc(chan->ld_queue.prev); - - if (list_empty(&chan->ld_queue)) - return; - - /* Link to the new descriptor physical address and - * Enable End-of-segment interrupt for - * the last link descriptor. - * (the previous node's next link descriptor) - * - * For FSL_DMA_IP_83xx, the snoop enable bit need be set. - */ - queue_tail->hw.next_ln_addr = CPU_TO_DMA(chan, - new_desc->async_tx.phys | FSL_DMA_EOSIE | - (((chan->feature & FSL_DMA_IP_MASK) - == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); -} - /** * fsl_chan_set_src_loop_size - Set source address hold transfer size * @chan : Freescale DMA channel @@ -343,6 +317,31 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) chan->feature &= ~FSL_DMA_CHAN_START_EXT; } +static void append_ld_queue(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); + + if (list_empty(&chan->ld_pending)) + goto out_splice; + + /* + * Add the hardware descriptor to the chain of hardware descriptors + * that already exists in memory. + * + * This will un-set the EOL bit of the existing transaction, and the + * last link in this transaction will become the EOL descriptor. + */ + set_desc_next(chan, &tail->hw, desc->async_tx.phys); + + /* + * Add the software descriptor and all children to the list + * of pending transactions + */ +out_splice: + list_splice_tail_init(&desc->tx_list, &chan->ld_pending); +} + static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) { struct fsldma_chan *chan = to_fsl_chan(tx->chan); @@ -351,9 +350,12 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) unsigned long flags; dma_cookie_t cookie; - /* cookie increment and adding to ld_queue must be atomic */ spin_lock_irqsave(&chan->desc_lock, flags); + /* + * assign cookies to all of the software descriptors + * that make up this transaction + */ cookie = chan->common.cookie; list_for_each_entry(child, &desc->tx_list, node) { cookie++; @@ -364,8 +366,9 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) } chan->common.cookie = cookie; + + /* put this transaction onto the tail of the pending queue */ append_ld_queue(chan, desc); - list_splice_init(&desc->tx_list, chan->ld_queue.prev); spin_unlock_irqrestore(&chan->desc_lock, flags); @@ -381,20 +384,22 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) static struct fsl_desc_sw *fsl_dma_alloc_descriptor( struct fsldma_chan *chan) { + struct fsl_desc_sw *desc; dma_addr_t pdesc; - struct fsl_desc_sw *desc_sw; - - desc_sw = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); - if (desc_sw) { - memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); - INIT_LIST_HEAD(&desc_sw->tx_list); - dma_async_tx_descriptor_init(&desc_sw->async_tx, - &chan->common); - desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; - desc_sw->async_tx.phys = pdesc; + + desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); + if (!desc) { + dev_dbg(chan->dev, "out of memory for link desc\n"); + return NULL; } - return desc_sw; + memset(desc, 0, sizeof(*desc)); + INIT_LIST_HEAD(&desc->tx_list); + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = fsl_dma_tx_submit; + desc->async_tx.phys = pdesc; + + return desc; } @@ -414,21 +419,53 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) if (chan->desc_pool) return 1; - /* We need the descriptor to be aligned to 32bytes + /* + * We need the descriptor to be aligned to 32bytes * for meeting FSL DMA specification requirement. */ chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", - chan->dev, sizeof(struct fsl_desc_sw), - 32, 0); + chan->dev, + sizeof(struct fsl_desc_sw), + __alignof__(struct fsl_desc_sw), 0); if (!chan->desc_pool) { - dev_err(chan->dev, "No memory for channel %d " - "descriptor dma pool.\n", chan->id); - return 0; + dev_err(chan->dev, "unable to allocate channel %d " + "descriptor pool\n", chan->id); + return -ENOMEM; } + /* there is at least one descriptor free to be allocated */ return 1; } +/** + * fsldma_free_desc_list - Free all descriptors in a queue + * @chan: Freescae DMA channel + * @list: the list to free + * + * LOCKING: must hold chan->desc_lock + */ +static void fsldma_free_desc_list(struct fsldma_chan *chan, + struct list_head *list) +{ + struct fsl_desc_sw *desc, *_desc; + + list_for_each_entry_safe(desc, _desc, list, node) { + list_del(&desc->node); + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); + } +} + +static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, + struct list_head *list) +{ + struct fsl_desc_sw *desc, *_desc; + + list_for_each_entry_safe_reverse(desc, _desc, list, node) { + list_del(&desc->node); + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); + } +} + /** * fsl_dma_free_chan_resources - Free all resources of the channel. * @chan : Freescale DMA channel @@ -436,23 +473,15 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) static void fsl_dma_free_chan_resources(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); - struct fsl_desc_sw *desc, *_desc; unsigned long flags; dev_dbg(chan->dev, "Free all channel resources.\n"); spin_lock_irqsave(&chan->desc_lock, flags); - list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) { -#ifdef FSL_DMA_LD_DEBUG - dev_dbg(chan->dev, - "LD %p will be released.\n", desc); -#endif - list_del(&desc->node); - /* free link descriptor */ - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); - } + fsldma_free_desc_list(chan, &chan->ld_pending); + fsldma_free_desc_list(chan, &chan->ld_running); spin_unlock_irqrestore(&chan->desc_lock, flags); - dma_pool_destroy(chan->desc_pool); + dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; } @@ -491,7 +520,6 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( { struct fsldma_chan *chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new; - struct list_head *list; size_t copy; if (!dchan) @@ -550,12 +578,7 @@ fail: if (!first) return NULL; - list = &first->tx_list; - list_for_each_entry_safe_reverse(new, prev, list, node) { - list_del(&new->node); - dma_pool_free(chan->desc_pool, new, new->async_tx.phys); - } - + fsldma_free_desc_list_reverse(chan, &first->tx_list); return NULL; } @@ -578,7 +601,6 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( struct fsldma_chan *chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; struct fsl_dma_slave *slave; - struct list_head *tx_list; size_t copy; int i; @@ -748,19 +770,13 @@ fail: * * We're re-using variables for the loop, oh well */ - tx_list = &first->tx_list; - list_for_each_entry_safe_reverse(new, prev, tx_list, node) { - list_del_init(&new->node); - dma_pool_free(chan->desc_pool, new, new->async_tx.phys); - } - + fsldma_free_desc_list_reverse(chan, &first->tx_list); return NULL; } static void fsl_dma_device_terminate_all(struct dma_chan *dchan) { struct fsldma_chan *chan; - struct fsl_desc_sw *desc, *tmp; unsigned long flags; if (!dchan) @@ -774,10 +790,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan) spin_lock_irqsave(&chan->desc_lock, flags); /* Remove and free all of the descriptors in the LD queue */ - list_for_each_entry_safe(desc, tmp, &chan->ld_queue, node) { - list_del(&desc->node); - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); - } + fsldma_free_desc_list(chan, &chan->ld_pending); + fsldma_free_desc_list(chan, &chan->ld_running); spin_unlock_irqrestore(&chan->desc_lock, flags); } @@ -785,31 +799,48 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan) /** * fsl_dma_update_completed_cookie - Update the completed cookie. * @chan : Freescale DMA channel + * + * CONTEXT: hardirq */ static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) { - struct fsl_desc_sw *cur_desc, *desc; - dma_addr_t ld_phy; - - ld_phy = get_cdar(chan) & FSL_DMA_NLDA_MASK; + struct fsl_desc_sw *desc; + unsigned long flags; + dma_cookie_t cookie; - if (ld_phy) { - cur_desc = NULL; - list_for_each_entry(desc, &chan->ld_queue, node) - if (desc->async_tx.phys == ld_phy) { - cur_desc = desc; - break; - } + spin_lock_irqsave(&chan->desc_lock, flags); - if (cur_desc && cur_desc->async_tx.cookie) { - if (dma_is_idle(chan)) - chan->completed_cookie = - cur_desc->async_tx.cookie; - else - chan->completed_cookie = - cur_desc->async_tx.cookie - 1; - } + if (list_empty(&chan->ld_running)) { + dev_dbg(chan->dev, "no running descriptors\n"); + goto out_unlock; } + + /* Get the last descriptor, update the cookie to that */ + desc = to_fsl_desc(chan->ld_running.prev); + if (dma_is_idle(chan)) + cookie = desc->async_tx.cookie; + else + cookie = desc->async_tx.cookie - 1; + + chan->completed_cookie = cookie; + +out_unlock: + spin_unlock_irqrestore(&chan->desc_lock, flags); +} + +/** + * fsldma_desc_status - Check the status of a descriptor + * @chan: Freescale DMA channel + * @desc: DMA SW descriptor + * + * This function will return the status of the given descriptor + */ +static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + return dma_async_is_complete(desc->async_tx.cookie, + chan->completed_cookie, + chan->common.cookie); } /** @@ -817,8 +848,6 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) * @chan : Freescale DMA channel * * This function clean up the ld_queue of DMA channel. - * If 'in_intr' is set, the function will move the link descriptor to - * the recycle list. Otherwise, free it directly. */ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) { @@ -827,80 +856,95 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) spin_lock_irqsave(&chan->desc_lock, flags); - dev_dbg(chan->dev, "chan completed_cookie = %d\n", - chan->completed_cookie); - list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) { + dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); + list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { dma_async_tx_callback callback; void *callback_param; - if (dma_async_is_complete(desc->async_tx.cookie, - chan->completed_cookie, chan->common.cookie) - == DMA_IN_PROGRESS) + if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) break; - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; - - /* Remove from ld_queue list */ + /* Remove from the list of running transactions */ list_del(&desc->node); - dev_dbg(chan->dev, "link descriptor %p will be recycle.\n", - desc); - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); - /* Run the link descriptor callback function */ + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; if (callback) { spin_unlock_irqrestore(&chan->desc_lock, flags); - dev_dbg(chan->dev, "link descriptor %p callback\n", - desc); + dev_dbg(chan->dev, "LD %p callback\n", desc); callback(callback_param); spin_lock_irqsave(&chan->desc_lock, flags); } + + /* Run any dependencies, then free the descriptor */ + dma_run_dependencies(&desc->async_tx); + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } + spin_unlock_irqrestore(&chan->desc_lock, flags); } /** - * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. + * fsl_chan_xfer_ld_queue - transfer any pending transactions * @chan : Freescale DMA channel + * + * This will make sure that any pending transactions will be run. + * If the DMA controller is idle, it will be started. Otherwise, + * the DMA controller's interrupt handler will start any pending + * transactions when it becomes idle. */ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) { - struct list_head *ld_node; - dma_addr_t next_dst_addr; + struct fsl_desc_sw *desc; unsigned long flags; spin_lock_irqsave(&chan->desc_lock, flags); - if (!dma_is_idle(chan)) + /* + * If the list of pending descriptors is empty, then we + * don't need to do any work at all + */ + if (list_empty(&chan->ld_pending)) { + dev_dbg(chan->dev, "no pending LDs\n"); goto out_unlock; + } + /* + * The DMA controller is not idle, which means the interrupt + * handler will start any queued transactions when it runs + * at the end of the current transaction + */ + if (!dma_is_idle(chan)) { + dev_dbg(chan->dev, "DMA controller still busy\n"); + goto out_unlock; + } + + /* + * TODO: + * make sure the dma_halt() function really un-wedges the + * controller as much as possible + */ dma_halt(chan); - /* If there are some link descriptors - * not transfered in queue. We need to start it. + /* + * If there are some link descriptors which have not been + * transferred, we need to start the controller */ - /* Find the first un-transfer desciptor */ - for (ld_node = chan->ld_queue.next; - (ld_node != &chan->ld_queue) - && (dma_async_is_complete( - to_fsl_desc(ld_node)->async_tx.cookie, - chan->completed_cookie, - chan->common.cookie) == DMA_SUCCESS); - ld_node = ld_node->next); - - if (ld_node != &chan->ld_queue) { - /* Get the ld start address from ld_queue */ - next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys; - dev_dbg(chan->dev, "xfer LDs staring from 0x%llx\n", - (unsigned long long)next_dst_addr); - set_cdar(chan, next_dst_addr); - dma_start(chan); - } else { - set_cdar(chan, 0); - set_ndar(chan, 0); - } + /* + * Move all elements from the queue of pending transactions + * onto the list of running transactions + */ + desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); + list_splice_tail_init(&chan->ld_pending, &chan->ld_running); + + /* + * Program the descriptor's address into the DMA controller, + * then start the DMA transaction + */ + set_cdar(chan, desc->async_tx.phys); + dma_start(chan); out_unlock: spin_unlock_irqrestore(&chan->desc_lock, flags); @@ -913,30 +957,6 @@ out_unlock: static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); - -#ifdef FSL_DMA_LD_DEBUG - struct fsl_desc_sw *ld; - unsigned long flags; - - spin_lock_irqsave(&chan->desc_lock, flags); - if (list_empty(&chan->ld_queue)) { - spin_unlock_irqrestore(&chan->desc_lock, flags); - return; - } - - dev_dbg(chan->dev, "--memcpy issue--\n"); - list_for_each_entry(ld, &chan->ld_queue, node) { - int i; - dev_dbg(chan->dev, "Ch %d, LD %08x\n", - chan->id, ld->async_tx.phys); - for (i = 0; i < 8; i++) - dev_dbg(chan->dev, "LD offset %d: %08x\n", - i, *(((u32 *)&ld->hw) + i)); - } - dev_dbg(chan->dev, "----------------\n"); - spin_unlock_irqrestore(&chan->desc_lock, flags); -#endif - fsl_chan_xfer_ld_queue(chan); } @@ -978,10 +998,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) int xfer_ld_q = 0; u32 stat; + /* save and clear the status register */ stat = get_sr(chan); - dev_dbg(chan->dev, "event: channel %d, stat = 0x%x\n", - chan->id, stat); - set_sr(chan, stat); /* Clear the event register */ + set_sr(chan, stat); + dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); if (!stat) @@ -990,12 +1010,13 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) if (stat & FSL_DMA_SR_TE) dev_err(chan->dev, "Transfer Error!\n"); - /* Programming Error + /* + * Programming Error * The DMA_INTERRUPT async_tx is a NULL transfer, which will * triger a PE interrupt. */ if (stat & FSL_DMA_SR_PE) { - dev_dbg(chan->dev, "event: Programming Error INT\n"); + dev_dbg(chan->dev, "irq: Programming Error INT\n"); if (get_bcr(chan) == 0) { /* BCR register is 0, this is a DMA_INTERRUPT async_tx. * Now, update the completed cookie, and continue the @@ -1007,34 +1028,37 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) stat &= ~FSL_DMA_SR_PE; } - /* If the link descriptor segment transfer finishes, + /* + * If the link descriptor segment transfer finishes, * we will recycle the used descriptor. */ if (stat & FSL_DMA_SR_EOSI) { - dev_dbg(chan->dev, "event: End-of-segments INT\n"); - dev_dbg(chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", + dev_dbg(chan->dev, "irq: End-of-segments INT\n"); + dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", (unsigned long long)get_cdar(chan), (unsigned long long)get_ndar(chan)); stat &= ~FSL_DMA_SR_EOSI; update_cookie = 1; } - /* For MPC8349, EOCDI event need to update cookie + /* + * For MPC8349, EOCDI event need to update cookie * and start the next transfer if it exist. */ if (stat & FSL_DMA_SR_EOCDI) { - dev_dbg(chan->dev, "event: End-of-Chain link INT\n"); + dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); stat &= ~FSL_DMA_SR_EOCDI; update_cookie = 1; xfer_ld_q = 1; } - /* If it current transfer is the end-of-transfer, + /* + * If it current transfer is the end-of-transfer, * we should clear the Channel Start bit for * prepare next transfer. */ if (stat & FSL_DMA_SR_EOLNI) { - dev_dbg(chan->dev, "event: End-of-link INT\n"); + dev_dbg(chan->dev, "irq: End-of-link INT\n"); stat &= ~FSL_DMA_SR_EOLNI; xfer_ld_q = 1; } @@ -1044,10 +1068,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) if (xfer_ld_q) fsl_chan_xfer_ld_queue(chan); if (stat) - dev_dbg(chan->dev, "event: unhandled sr 0x%02x\n", - stat); + dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); - dev_dbg(chan->dev, "event: Exit\n"); + dev_dbg(chan->dev, "irq: Exit\n"); tasklet_schedule(&chan->tasklet); return IRQ_HANDLED; } @@ -1235,7 +1258,8 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, } spin_lock_init(&chan->desc_lock); - INIT_LIST_HEAD(&chan->ld_queue); + INIT_LIST_HEAD(&chan->ld_pending); + INIT_LIST_HEAD(&chan->ld_running); chan->common.device = &fdev->common; diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index ea3b19c8708..cb4d6ff5159 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -131,7 +131,8 @@ struct fsldma_chan { struct fsldma_chan_regs __iomem *regs; dma_cookie_t completed_cookie; /* The maximum cookie completed */ spinlock_t desc_lock; /* Descriptor operation lock */ - struct list_head ld_queue; /* Link descriptors queue */ + struct list_head ld_pending; /* Link descriptors queue */ + struct list_head ld_running; /* Link descriptors queue */ struct dma_chan common; /* DMA common channel */ struct dma_pool *desc_pool; /* Descriptors pool */ struct device *dev; /* Channel device */ -- cgit v1.2.3-70-g09d2 From 4b1cf1facca31b7db2a61d8aa2ba40d5a93a0957 Mon Sep 17 00:00:00 2001 From: Márton Németh Date: Tue, 2 Feb 2010 23:41:06 -0700 Subject: dma: make Open Firmware device id constant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The match_table field of the struct of_device_id is constant in so it is worth to make the initialization data also constant. The semantic match that finds this kind of pattern is as follows: (http://coccinelle.lip6.fr/) // @r@ disable decl_init,const_decl_init; identifier I1, I2, x; @@ struct I1 { ... const struct I2 *x; ... }; @s@ identifier r.I1, y; identifier r.x, E; @@ struct I1 y = { .x = E, }; @c@ identifier r.I2; identifier s.E; @@ const struct I2 E[] = ... ; @depends on !c@ identifier r.I2; identifier s.E; @@ + const struct I2 E[] = ...; // Signed-off-by: Márton Németh Cc: Julia Lawall Cc: cocci@diku.dk [dan.j.williams@intel.com: resolved conflict with recent fsldma updates] Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 2 +- drivers/dma/ppc4xx/adma.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 19011c20390..92efa87258b 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1398,7 +1398,7 @@ static int fsldma_of_remove(struct of_device *op) return 0; } -static struct of_device_id fsldma_of_ids[] = { +static const struct of_device_id fsldma_of_ids[] = { { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, {} diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 0a3478e910f..e69d87f24a2 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4940,7 +4940,7 @@ out_free: return ret; } -static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = { +static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = { { .compatible = "ibm,dma-440spe", }, { .compatible = "amcc,xor-accelerator", }, {}, -- cgit v1.2.3-70-g09d2 From 9ad7bd2944bd979ef4877cd439719be44c5f3b47 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Wed, 20 Jan 2010 01:25:56 +0100 Subject: dma: cases IPU_PIX_FMT_BGRA32, BGR32 and ABGR32 are the same in ipu_ch_param_set_size() In these cases the same statements are executed. Signed-off-by: Roel Kluin Acked-by: Guennadi Liakhovetski Signed-off-by: Andrew Morton Signed-off-by: Dan Williams --- drivers/dma/ipu/ipu_idmac.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 9a5bc1a7389..1c518f1cc49 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, break; case IPU_PIX_FMT_BGRA32: case IPU_PIX_FMT_BGR32: + case IPU_PIX_FMT_ABGR32: params->ip.bpp = 0; params->ip.pfs = 4; params->ip.npb = 7; @@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, params->ip.wid2 = 7; /* Blue bit width - 1 */ params->ip.wid3 = 7; /* Alpha bit width - 1 */ break; - case IPU_PIX_FMT_ABGR32: - params->ip.bpp = 0; - params->ip.pfs = 4; - params->ip.npb = 7; - params->ip.sat = 2; /* SAT = 32-bit access */ - params->ip.ofs0 = 8; /* Red bit offset */ - params->ip.ofs1 = 16; /* Green bit offset */ - params->ip.ofs2 = 24; /* Blue bit offset */ - params->ip.ofs3 = 0; /* Alpha bit offset */ - params->ip.wid0 = 7; /* Red bit width - 1 */ - params->ip.wid1 = 7; /* Green bit width - 1 */ - params->ip.wid2 = 7; /* Blue bit width - 1 */ - params->ip.wid3 = 7; /* Alpha bit width - 1 */ - break; case IPU_PIX_FMT_UYVY: params->ip.bpp = 2; params->ip.pfs = 6; -- cgit v1.2.3-70-g09d2 From fc4618575f79eea062cdc51715040e40cd35b71c Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 19 Jan 2010 07:24:55 +0000 Subject: sh: prepare the DMA driver for slave functionality Slave DMA functionality uses scatter-gather arrays for data transfers, whereas memcpy just uses a single data buffer. This patch converts the current memcpy implementation in shdma.c to use scatter-gather, making it just a special case with one SG-element. This allows us to isolate descriptor list manipulations and locking into one function, thus reducing error chances. Signed-off-by: Guennadi Liakhovetski Acked-by: Dan Williams Signed-off-by: Paul Mundt --- drivers/dma/shdma.c | 221 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 153 insertions(+), 68 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index d10cc899c46..427c3effc43 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -53,12 +53,12 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) { - ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); + ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); } static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) { - return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); + return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); } static void dmae_init(struct sh_dmae_chan *sh_chan) @@ -95,14 +95,14 @@ static int sh_dmae_rst(int id) return 0; } -static int dmae_is_busy(struct sh_dmae_chan *sh_chan) +static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) { u32 chcr = sh_dmae_readl(sh_chan, CHCR); - if (chcr & CHCR_DE) { - if (!(chcr & CHCR_TE)) - return -EBUSY; /* working */ - } - return 0; /* waiting */ + + if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) + return true; /* working */ + + return false; /* waiting */ } static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) @@ -136,10 +136,9 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) { - int ret = dmae_is_busy(sh_chan); /* When DMA was working, can not set data to CHCR */ - if (ret) - return ret; + if (dmae_is_busy(sh_chan)) + return -EBUSY; sh_dmae_writel(sh_chan, val, CHCR); return 0; @@ -153,9 +152,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) { u32 addr; int shift = 0; - int ret = dmae_is_busy(sh_chan); - if (ret) - return ret; + + if (dmae_is_busy(sh_chan)) + return -EBUSY; if (sh_chan->id & DMARS_CHAN_MSK) shift = DMARS_SHIFT; @@ -301,23 +300,95 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) kfree(desc); } -static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( - struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, - size_t len, unsigned long flags) +/* + * sh_dmae_add_desc - get, set up and return one transfer descriptor + * @sh_chan: DMA channel + * @flags: DMA transfer flags + * @dest: destination DMA address, incremented when direction equals + * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL + * @src: source DMA address, incremented when direction equals + * DMA_TO_DEVICE or DMA_BIDIRECTIONAL + * @len: DMA transfer length + * @first: if NULL, set to the current descriptor and cookie set to -EBUSY + * @direction: needed for slave DMA to decide which address to keep constant, + * equals DMA_BIDIRECTIONAL for MEMCPY + * Returns 0 or an error + * Locks: called with desc_lock held + */ +static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, + unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, + struct sh_desc **first, enum dma_data_direction direction) { - struct sh_dmae_chan *sh_chan; - struct sh_desc *first = NULL, *prev = NULL, *new; + struct sh_desc *new; size_t copy_size; - LIST_HEAD(tx_list); - int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1); - if (!chan) + if (!*len) return NULL; - if (!len) + /* Allocate the link descriptor from the free list */ + new = sh_dmae_get_desc(sh_chan); + if (!new) { + dev_err(sh_chan->dev, "No free link descriptor available\n"); return NULL; + } - sh_chan = to_sh_chan(chan); + copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); + + new->hw.sar = *src; + new->hw.dar = *dest; + new->hw.tcr = copy_size; + + if (!*first) { + /* First desc */ + new->async_tx.cookie = -EBUSY; + *first = new; + } else { + /* Other desc - invisible to the user */ + new->async_tx.cookie = -EINVAL; + } + + dev_dbg(sh_chan->dev, "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", + copy_size, *len, *src, *dest, &new->async_tx, + new->async_tx.cookie); + + new->mark = DESC_PREPARED; + new->async_tx.flags = flags; + + *len -= copy_size; + if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) + *src += copy_size; + if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) + *dest += copy_size; + + return new; +} + +/* + * sh_dmae_prep_sg - prepare transfer descriptors from an SG list + * + * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also + * converted to scatter-gather to guarantee consistent locking and a correct + * list manipulation. For slave DMA direction carries the usual meaning, and, + * logically, the SG list is RAM and the addr variable contains slave address, + * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL + * and the SG list contains only one element and points at the source buffer. + */ +static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, + struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, + enum dma_data_direction direction, unsigned long flags) +{ + struct scatterlist *sg; + struct sh_desc *first = NULL, *new = NULL /* compiler... */; + LIST_HEAD(tx_list); + int chunks = 0; + int i; + + if (!sg_len) + return NULL; + + for_each_sg(sgl, sg, sg_len, i) + chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / + (SH_DMA_TCR_MAX + 1); /* Have to lock the whole loop to protect against concurrent release */ spin_lock_bh(&sh_chan->desc_lock); @@ -333,49 +404,32 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( * only during this function, then they are immediately spliced * back onto the free list in form of a chain */ - do { - /* Allocate the link descriptor from the free list */ - new = sh_dmae_get_desc(sh_chan); - if (!new) { - dev_err(sh_chan->dev, - "No free memory for link descriptor\n"); - list_for_each_entry(new, &tx_list, node) - new->mark = DESC_IDLE; - list_splice(&tx_list, &sh_chan->ld_free); - spin_unlock_bh(&sh_chan->desc_lock); - return NULL; - } - - copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1); - - new->hw.sar = dma_src; - new->hw.dar = dma_dest; - new->hw.tcr = copy_size; - if (!first) { - /* First desc */ - new->async_tx.cookie = -EBUSY; - first = new; - } else { - /* Other desc - invisible to the user */ - new->async_tx.cookie = -EINVAL; - } - - dev_dbg(sh_chan->dev, - "chaining %u of %u with %p, dst %x, cookie %d\n", - copy_size, len, &new->async_tx, dma_dest, - new->async_tx.cookie); - - new->mark = DESC_PREPARED; - new->async_tx.flags = flags; - new->chunks = chunks--; - - prev = new; - len -= copy_size; - dma_src += copy_size; - dma_dest += copy_size; - /* Insert the link descriptor to the LD ring */ - list_add_tail(&new->node, &tx_list); - } while (len); + for_each_sg(sgl, sg, sg_len, i) { + dma_addr_t sg_addr = sg_dma_address(sg); + size_t len = sg_dma_len(sg); + + if (!len) + goto err_get_desc; + + do { + dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", + i, sg, len, (unsigned long long)sg_addr); + + if (direction == DMA_FROM_DEVICE) + new = sh_dmae_add_desc(sh_chan, flags, + &sg_addr, addr, &len, &first, + direction); + else + new = sh_dmae_add_desc(sh_chan, flags, + addr, &sg_addr, &len, &first, + direction); + if (!new) + goto err_get_desc; + + new->chunks = chunks--; + list_add_tail(&new->node, &tx_list); + } while (len); + } if (new != first) new->async_tx.cookie = -ENOSPC; @@ -386,6 +440,37 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( spin_unlock_bh(&sh_chan->desc_lock); return &first->async_tx; + +err_get_desc: + list_for_each_entry(new, &tx_list, node) + new->mark = DESC_IDLE; + list_splice(&tx_list, &sh_chan->ld_free); + + spin_unlock_bh(&sh_chan->desc_lock); + + return NULL; +} + +static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( + struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, + size_t len, unsigned long flags) +{ + struct sh_dmae_chan *sh_chan; + struct scatterlist sg; + + if (!chan || !len) + return NULL; + + sh_chan = to_sh_chan(chan); + + sg_init_table(&sg, 1); + sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, + offset_in_page(dma_src)); + sg_dma_address(&sg) = dma_src; + sg_dma_len(&sg) = len; + + return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, + flags); } static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) @@ -559,7 +644,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data) /* IRQ Multi */ if (shdev->pdata.mode & SHDMA_MIX_IRQ) { - int cnt = 0; + int __maybe_unused cnt = 0; switch (irq) { #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) case DMTE6_IRQ: -- cgit v1.2.3-70-g09d2 From 623b4ac4bf9e767991c66e29b47dd4b19458fb42 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 3 Feb 2010 14:44:12 +0000 Subject: sh: fix Transfer Size calculation in both DMA drivers Both the original arch/sh/drivers/dma/dma-sh.c and the new SH dmaengine drivers do not take into account bits 3:2 of the Transfer Size field in the CHCR register, besides, bit-field defines set bit 2, but the mask only passes bits 1:0 through. TS_16BLK and TS_32BLK macros are bogus too. This patch fixes all these issues for sh7722 and sh7724, other CPUs stay unchanged and might need to be fixed too. Signed-off-by: Guennadi Liakhovetski Acked-by: Dan Williams Signed-off-by: Paul Mundt --- arch/sh/drivers/dma/dma-sh.c | 5 +- arch/sh/include/asm/dma-sh.h | 2 +- arch/sh/include/cpu-sh3/cpu/dma.h | 20 ++++--- arch/sh/include/cpu-sh4/cpu/dma-sh4a.h | 97 +++++++++++++++++++++++++--------- arch/sh/include/cpu-sh4/cpu/dma.h | 35 ++++++------ drivers/dma/shdma.c | 6 ++- 6 files changed, 114 insertions(+), 51 deletions(-) (limited to 'drivers/dma') diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index 37fb5b8bbc3..31830cb0af8 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c @@ -52,11 +52,14 @@ static inline unsigned int get_dmte_irq(unsigned int chan) * * iterations to complete the transfer. */ +static unsigned int ts_shift[] = TS_SHIFT; static inline unsigned int calc_xmit_shift(struct dma_channel *chan) { u32 chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR); + int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | + ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); - return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT]; + return ts_shift[cnt]; } /* diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index 78eed3e0bdf..01d2fc72551 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h @@ -83,7 +83,7 @@ static int dmte_irq_map[] __maybe_unused = { * Define the default configuration for dual address memory-memory transfer. * The 0x400 value represents auto-request, external->external. */ -#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32) +#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT)) /* DMA base address */ static u32 dma_base_addr[] __maybe_unused = { diff --git a/arch/sh/include/cpu-sh3/cpu/dma.h b/arch/sh/include/cpu-sh3/cpu/dma.h index 0ea15f3f236..207811a7a65 100644 --- a/arch/sh/include/cpu-sh3/cpu/dma.h +++ b/arch/sh/include/cpu-sh3/cpu/dma.h @@ -20,8 +20,10 @@ #define TS_32 0x00000010 #define TS_128 0x00000018 -#define CHCR_TS_MASK 0x18 -#define CHCR_TS_SHIFT 3 +#define CHCR_TS_LOW_MASK 0x18 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 #define DMAOR_INIT DMAOR_DME @@ -36,11 +38,13 @@ enum { XMIT_SZ_128BIT, }; -static unsigned int ts_shift[] __maybe_unused = { - [XMIT_SZ_8BIT] = 0, - [XMIT_SZ_16BIT] = 1, - [XMIT_SZ_32BIT] = 2, - [XMIT_SZ_128BIT] = 4, -}; +#define TS_SHIFT { \ + [XMIT_SZ_8BIT] = 0, \ + [XMIT_SZ_16BIT] = 1, \ + [XMIT_SZ_32BIT] = 2, \ + [XMIT_SZ_128BIT] = 4, \ +} + +#define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT) #endif /* __ASM_CPU_SH3_DMA_H */ diff --git a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h index c4ed660c14c..cc1cf3e8f16 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h +++ b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h @@ -2,13 +2,26 @@ #define __ASM_SH_CPU_SH4_DMA_SH7780_H #if defined(CONFIG_CPU_SUBTYPE_SH7343) || \ - defined(CONFIG_CPU_SUBTYPE_SH7722) || \ defined(CONFIG_CPU_SUBTYPE_SH7730) #define DMTE0_IRQ 48 #define DMTE4_IRQ 76 #define DMAE0_IRQ 78 /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMARS_BASE 0xFE009000 +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 +#elif defined(CONFIG_CPU_SUBTYPE_SH7722) +#define DMTE0_IRQ 48 +#define DMTE4_IRQ 76 +#define DMAE0_IRQ 78 /* DMA Error IRQ*/ +#define SH_DMAC_BASE0 0xFE008020 +#define SH_DMARS_BASE 0xFE009000 +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0x00300000 +#define CHCR_TS_HIGH_SHIFT 20 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_CPU_SUBTYPE_SH7764) #define DMTE0_IRQ 34 @@ -16,8 +29,11 @@ #define DMAE0_IRQ 38 #define SH_DMAC_BASE0 0xFF608020 #define SH_DMARS_BASE 0xFF609000 -#elif defined(CONFIG_CPU_SUBTYPE_SH7723) || \ - defined(CONFIG_CPU_SUBTYPE_SH7724) +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 +#elif defined(CONFIG_CPU_SUBTYPE_SH7723) #define DMTE0_IRQ 48 /* DMAC0A*/ #define DMTE4_IRQ 76 /* DMAC0B */ #define DMTE6_IRQ 40 @@ -30,6 +46,27 @@ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMAC_BASE1 0xFDC08020 #define SH_DMARS_BASE 0xFDC09000 +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 +#elif defined(CONFIG_CPU_SUBTYPE_SH7724) +#define DMTE0_IRQ 48 /* DMAC0A*/ +#define DMTE4_IRQ 76 /* DMAC0B */ +#define DMTE6_IRQ 40 +#define DMTE8_IRQ 42 /* DMAC1A */ +#define DMTE9_IRQ 43 +#define DMTE10_IRQ 72 /* DMAC1B */ +#define DMTE11_IRQ 73 +#define DMAE0_IRQ 78 /* DMA Error IRQ*/ +#define DMAE1_IRQ 74 /* DMA Error IRQ*/ +#define SH_DMAC_BASE0 0xFE008020 +#define SH_DMAC_BASE1 0xFDC08020 +#define SH_DMARS_BASE 0xFDC09000 +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0x00600000 +#define CHCR_TS_HIGH_SHIFT 21 #elif defined(CONFIG_CPU_SUBTYPE_SH7780) #define DMTE0_IRQ 34 #define DMTE4_IRQ 44 @@ -42,6 +79,10 @@ #define SH_DMAC_BASE0 0xFC808020 #define SH_DMAC_BASE1 0xFC818020 #define SH_DMARS_BASE 0xFC809000 +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 #else /* SH7785 */ #define DMTE0_IRQ 33 #define DMTE4_IRQ 37 @@ -55,17 +96,16 @@ #define SH_DMAC_BASE0 0xFC808020 #define SH_DMAC_BASE1 0xFCC08020 #define SH_DMARS_BASE 0xFC809000 +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 #endif -#define REQ_HE 0x000000C0 -#define REQ_H 0x00000080 -#define REQ_LE 0x00000040 -#define TM_BURST 0x0000020 -#define TS_8 0x00000000 -#define TS_16 0x00000008 -#define TS_32 0x00000010 -#define TS_16BLK 0x00000018 -#define TS_32BLK 0x00100000 +#define REQ_HE 0x000000C0 +#define REQ_H 0x00000080 +#define REQ_LE 0x00000040 +#define TM_BURST 0x00000020 /* * The SuperH DMAC supports a number of transmit sizes, we list them here, @@ -74,22 +114,31 @@ * Defaults to a 64-bit transfer size. */ enum { - XMIT_SZ_8BIT, - XMIT_SZ_16BIT, - XMIT_SZ_32BIT, - XMIT_SZ_128BIT, - XMIT_SZ_256BIT, + XMIT_SZ_8BIT = 0, + XMIT_SZ_16BIT = 1, + XMIT_SZ_32BIT = 2, + XMIT_SZ_64BIT = 7, + XMIT_SZ_128BIT = 3, + XMIT_SZ_256BIT = 4, + XMIT_SZ_128BIT_BLK = 0xb, + XMIT_SZ_256BIT_BLK = 0xc, }; /* * The DMA count is defined as the number of bytes to transfer. */ -static unsigned int ts_shift[] __maybe_unused = { - [XMIT_SZ_8BIT] = 0, - [XMIT_SZ_16BIT] = 1, - [XMIT_SZ_32BIT] = 2, - [XMIT_SZ_128BIT] = 4, - [XMIT_SZ_256BIT] = 5, -}; +#define TS_SHIFT { \ + [XMIT_SZ_8BIT] = 0, \ + [XMIT_SZ_16BIT] = 1, \ + [XMIT_SZ_32BIT] = 2, \ + [XMIT_SZ_64BIT] = 3, \ + [XMIT_SZ_128BIT] = 4, \ + [XMIT_SZ_256BIT] = 5, \ + [XMIT_SZ_128BIT_BLK] = 4, \ + [XMIT_SZ_256BIT_BLK] = 5, \ +} + +#define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \ + ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT)) #endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */ diff --git a/arch/sh/include/cpu-sh4/cpu/dma.h b/arch/sh/include/cpu-sh4/cpu/dma.h index bcb30246e85..114a369705b 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma.h +++ b/arch/sh/include/cpu-sh4/cpu/dma.h @@ -6,8 +6,6 @@ #ifdef CONFIG_CPU_SH4A #define DMAOR_INIT (DMAOR_DME) -#define CHCR_TS_MASK 0x18 -#define CHCR_TS_SHIFT 3 #include #else /* CONFIG_CPU_SH4A */ @@ -29,8 +27,10 @@ #define TS_32 0x00000030 #define TS_64 0x00000000 -#define CHCR_TS_MASK 0x70 -#define CHCR_TS_SHIFT 4 +#define CHCR_TS_LOW_MASK 0x70 +#define CHCR_TS_LOW_SHIFT 4 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 #define DMAOR_COD 0x00000008 @@ -41,23 +41,26 @@ * Defaults to a 64-bit transfer size. */ enum { - XMIT_SZ_64BIT, - XMIT_SZ_8BIT, - XMIT_SZ_16BIT, - XMIT_SZ_32BIT, - XMIT_SZ_256BIT, + XMIT_SZ_8BIT = 1, + XMIT_SZ_16BIT = 2, + XMIT_SZ_32BIT = 3, + XMIT_SZ_64BIT = 0, + XMIT_SZ_256BIT = 4, }; /* * The DMA count is defined as the number of bytes to transfer. */ -static unsigned int ts_shift[] __maybe_unused = { - [XMIT_SZ_64BIT] = 3, - [XMIT_SZ_8BIT] = 0, - [XMIT_SZ_16BIT] = 1, - [XMIT_SZ_32BIT] = 2, - [XMIT_SZ_256BIT] = 5, -}; +#define TS_SHIFT { \ + [XMIT_SZ_8BIT] = 0, \ + [XMIT_SZ_16BIT] = 1, \ + [XMIT_SZ_32BIT] = 2, \ + [XMIT_SZ_64BIT] = 3, \ + [XMIT_SZ_256BIT] = 5, \ +} + +#define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT) + #endif #endif /* __ASM_CPU_SH4_DMA_H */ diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 427c3effc43..3e1037c5ebd 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -105,10 +105,14 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) return false; /* waiting */ } +static unsigned int ts_shift[] = TS_SHIFT; static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) { u32 chcr = sh_dmae_readl(sh_chan, CHCR); - return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; + int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | + ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); + + return ts_shift[cnt]; } static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) -- cgit v1.2.3-70-g09d2 From cfefe99795251d76d92e8457f4152f532a961ec5 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 3 Feb 2010 14:46:41 +0000 Subject: sh: implement DMA_SLAVE capability in SH dmaengine driver Tested to work with a SIU ASoC driver on sh7722 (migor). Signed-off-by: Guennadi Liakhovetski Acked-by: Dan Williams Signed-off-by: Paul Mundt --- arch/sh/include/asm/dma-sh.h | 41 ++++++- arch/sh/include/cpu-sh4/cpu/dma-sh4a.h | 15 +-- drivers/dma/shdma.c | 190 ++++++++++++++++++++++++--------- drivers/dma/shdma.h | 7 +- 4 files changed, 192 insertions(+), 61 deletions(-) (limited to 'drivers/dma') diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index 01d2fc72551..c8d8ce78f34 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h @@ -64,8 +64,10 @@ static int dmte_irq_map[] __maybe_unused = { #define ACK_L 0x00010000 #define DM_INC 0x00004000 #define DM_DEC 0x00008000 +#define DM_FIX 0x0000c000 #define SM_INC 0x00001000 #define SM_DEC 0x00002000 +#define SM_FIX 0x00003000 #define RS_IN 0x00000200 #define RS_OUT 0x00000300 #define TS_BLK 0x00000040 @@ -123,10 +125,47 @@ static u32 dma_base_addr[] __maybe_unused = { */ #define SHDMA_MIX_IRQ (1 << 1) #define SHDMA_DMAOR1 (1 << 2) -#define SHDMA_DMAE1 (1 << 3) +#define SHDMA_DMAE1 (1 << 3) + +enum sh_dmae_slave_chan_id { + SHDMA_SLAVE_SCIF0_TX, + SHDMA_SLAVE_SCIF0_RX, + SHDMA_SLAVE_SCIF1_TX, + SHDMA_SLAVE_SCIF1_RX, + SHDMA_SLAVE_SCIF2_TX, + SHDMA_SLAVE_SCIF2_RX, + SHDMA_SLAVE_SCIF3_TX, + SHDMA_SLAVE_SCIF3_RX, + SHDMA_SLAVE_SCIF4_TX, + SHDMA_SLAVE_SCIF4_RX, + SHDMA_SLAVE_SCIF5_TX, + SHDMA_SLAVE_SCIF5_RX, + SHDMA_SLAVE_SIUA_TX, + SHDMA_SLAVE_SIUA_RX, + SHDMA_SLAVE_SIUB_TX, + SHDMA_SLAVE_SIUB_RX, + SHDMA_SLAVE_NUMBER, /* Must stay last */ +}; + +struct sh_dmae_slave_config { + enum sh_dmae_slave_chan_id slave_id; + dma_addr_t addr; + u32 chcr; + char mid_rid; +}; struct sh_dmae_pdata { unsigned int mode; + struct sh_dmae_slave_config *config; + int config_num; +}; + +struct device; + +struct sh_dmae_slave { + enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */ + struct device *dma_dev; /* Set by the platform */ + struct sh_dmae_slave_config *config; /* Set by the driver */ }; #endif /* __DMA_SH_H */ diff --git a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h index cc1cf3e8f16..e734ea47d8a 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h +++ b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h @@ -7,7 +7,7 @@ #define DMTE4_IRQ 76 #define DMAE0_IRQ 78 /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 -#define SH_DMARS_BASE 0xFE009000 +#define SH_DMARS_BASE0 0xFE009000 #define CHCR_TS_LOW_MASK 0x00000018 #define CHCR_TS_LOW_SHIFT 3 #define CHCR_TS_HIGH_MASK 0 @@ -17,7 +17,7 @@ #define DMTE4_IRQ 76 #define DMAE0_IRQ 78 /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 -#define SH_DMARS_BASE 0xFE009000 +#define SH_DMARS_BASE0 0xFE009000 #define CHCR_TS_LOW_MASK 0x00000018 #define CHCR_TS_LOW_SHIFT 3 #define CHCR_TS_HIGH_MASK 0x00300000 @@ -28,7 +28,7 @@ #define DMTE4_IRQ 44 #define DMAE0_IRQ 38 #define SH_DMAC_BASE0 0xFF608020 -#define SH_DMARS_BASE 0xFF609000 +#define SH_DMARS_BASE0 0xFF609000 #define CHCR_TS_LOW_MASK 0x00000018 #define CHCR_TS_LOW_SHIFT 3 #define CHCR_TS_HIGH_MASK 0 @@ -45,7 +45,7 @@ #define DMAE1_IRQ 74 /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMAC_BASE1 0xFDC08020 -#define SH_DMARS_BASE 0xFDC09000 +#define SH_DMARS_BASE0 0xFDC09000 #define CHCR_TS_LOW_MASK 0x00000018 #define CHCR_TS_LOW_SHIFT 3 #define CHCR_TS_HIGH_MASK 0 @@ -62,7 +62,8 @@ #define DMAE1_IRQ 74 /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMAC_BASE1 0xFDC08020 -#define SH_DMARS_BASE 0xFDC09000 +#define SH_DMARS_BASE0 0xFE009000 +#define SH_DMARS_BASE1 0xFDC09000 #define CHCR_TS_LOW_MASK 0x00000018 #define CHCR_TS_LOW_SHIFT 3 #define CHCR_TS_HIGH_MASK 0x00600000 @@ -78,7 +79,7 @@ #define DMAE0_IRQ 38 /* DMA Error IRQ */ #define SH_DMAC_BASE0 0xFC808020 #define SH_DMAC_BASE1 0xFC818020 -#define SH_DMARS_BASE 0xFC809000 +#define SH_DMARS_BASE0 0xFC809000 #define CHCR_TS_LOW_MASK 0x00000018 #define CHCR_TS_LOW_SHIFT 3 #define CHCR_TS_HIGH_MASK 0 @@ -95,7 +96,7 @@ #define DMAE1_IRQ 58 /* DMA Error IRQ1 */ #define SH_DMAC_BASE0 0xFC808020 #define SH_DMAC_BASE1 0xFCC08020 -#define SH_DMARS_BASE 0xFC809000 +#define SH_DMARS_BASE0 0xFC809000 #define CHCR_TS_LOW_MASK 0x00000018 #define CHCR_TS_LOW_SHIFT 3 #define CHCR_TS_HIGH_MASK 0 diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 3e1037c5ebd..b75ce8b84c4 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -48,6 +48,9 @@ enum sh_dmae_desc_status { */ #define RS_DEFAULT (RS_DUAL) +/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ +static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; + static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) @@ -61,12 +64,6 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); } -static void dmae_init(struct sh_dmae_chan *sh_chan) -{ - u32 chcr = RS_DEFAULT; /* default is DUAL mode */ - sh_dmae_writel(sh_chan, chcr, CHCR); -} - /* * Reset DMA controller * @@ -106,9 +103,8 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) } static unsigned int ts_shift[] = TS_SHIFT; -static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) +static inline unsigned int calc_xmit_shift(u32 chcr) { - u32 chcr = sh_dmae_readl(sh_chan, CHCR); int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); @@ -119,7 +115,7 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) { sh_dmae_writel(sh_chan, hw->sar, SAR); sh_dmae_writel(sh_chan, hw->dar, DAR); - sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); + sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); } static void dmae_start(struct sh_dmae_chan *sh_chan) @@ -127,7 +123,7 @@ static void dmae_start(struct sh_dmae_chan *sh_chan) u32 chcr = sh_dmae_readl(sh_chan, CHCR); chcr |= CHCR_DE | CHCR_IE; - sh_dmae_writel(sh_chan, chcr, CHCR); + sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); } static void dmae_halt(struct sh_dmae_chan *sh_chan) @@ -138,20 +134,27 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) sh_dmae_writel(sh_chan, chcr, CHCR); } +static void dmae_init(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = RS_DEFAULT; /* default is DUAL mode */ + sh_chan->xmit_shift = calc_xmit_shift(chcr); + sh_dmae_writel(sh_chan, chcr, CHCR); +} + static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) { /* When DMA was working, can not set data to CHCR */ if (dmae_is_busy(sh_chan)) return -EBUSY; + sh_chan->xmit_shift = calc_xmit_shift(val); sh_dmae_writel(sh_chan, val, CHCR); + return 0; } -#define DMARS1_ADDR 0x04 -#define DMARS2_ADDR 0x08 -#define DMARS_SHIFT 8 -#define DMARS_CHAN_MSK 0x01 +#define DMARS_SHIFT 8 +#define DMARS_CHAN_MSK 0x01 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) { u32 addr; @@ -163,29 +166,18 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) if (sh_chan->id & DMARS_CHAN_MSK) shift = DMARS_SHIFT; - switch (sh_chan->id) { - /* DMARS0 */ - case 0: - case 1: - addr = SH_DMARS_BASE; - break; - /* DMARS1 */ - case 2: - case 3: - addr = (SH_DMARS_BASE + DMARS1_ADDR); - break; - /* DMARS2 */ - case 4: - case 5: - addr = (SH_DMARS_BASE + DMARS2_ADDR); - break; - default: + if (sh_chan->id < 6) + /* DMA0RS0 - DMA0RS2 */ + addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4; +#ifdef SH_DMARS_BASE1 + else if (sh_chan->id < 12) + /* DMA1RS0 - DMA1RS2 */ + addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4; +#endif + else return -EINVAL; - } - ctrl_outw((val << shift) | - (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), - addr); + ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr); return 0; } @@ -253,10 +245,53 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) return NULL; } +static struct sh_dmae_slave_config *sh_dmae_find_slave( + struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) +{ + struct dma_device *dma_dev = sh_chan->common.device; + struct sh_dmae_device *shdev = container_of(dma_dev, + struct sh_dmae_device, common); + struct sh_dmae_pdata *pdata = &shdev->pdata; + int i; + + if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) + return NULL; + + for (i = 0; i < pdata->config_num; i++) + if (pdata->config[i].slave_id == slave_id) + return pdata->config + i; + + return NULL; +} + static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) { struct sh_dmae_chan *sh_chan = to_sh_chan(chan); struct sh_desc *desc; + struct sh_dmae_slave *param = chan->private; + + /* + * This relies on the guarantee from dmaengine that alloc_chan_resources + * never runs concurrently with itself or free_chan_resources. + */ + if (param) { + struct sh_dmae_slave_config *cfg; + + cfg = sh_dmae_find_slave(sh_chan, param->slave_id); + if (!cfg) + return -EINVAL; + + if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) + return -EBUSY; + + param->config = cfg; + + dmae_set_dmars(sh_chan, cfg->mid_rid); + dmae_set_chcr(sh_chan, cfg->chcr); + } else { + if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400) + dmae_set_chcr(sh_chan, RS_DEFAULT); + } spin_lock_bh(&sh_chan->desc_lock); while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { @@ -289,10 +324,18 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) struct sh_desc *desc, *_desc; LIST_HEAD(list); + dmae_halt(sh_chan); + /* Prepared and not submitted descriptors can still be on the queue */ if (!list_empty(&sh_chan->ld_queue)) sh_dmae_chan_ld_cleanup(sh_chan, true); + if (chan->private) { + /* The caller is holding dma_list_mutex */ + struct sh_dmae_slave *param = chan->private; + clear_bit(param->slave_id, sh_dmae_slave_used); + } + spin_lock_bh(&sh_chan->desc_lock); list_splice_init(&sh_chan->ld_free, &list); @@ -304,7 +347,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) kfree(desc); } -/* +/** * sh_dmae_add_desc - get, set up and return one transfer descriptor * @sh_chan: DMA channel * @flags: DMA transfer flags @@ -351,12 +394,14 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, new->async_tx.cookie = -EINVAL; } - dev_dbg(sh_chan->dev, "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", + dev_dbg(sh_chan->dev, + "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", copy_size, *len, *src, *dest, &new->async_tx, - new->async_tx.cookie); + new->async_tx.cookie, sh_chan->xmit_shift); new->mark = DESC_PREPARED; new->async_tx.flags = flags; + new->direction = direction; *len -= copy_size; if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) @@ -465,6 +510,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( if (!chan || !len) return NULL; + chan->private = NULL; + sh_chan = to_sh_chan(chan); sg_init_table(&sg, 1); @@ -477,6 +524,44 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( flags); } +static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, + enum dma_data_direction direction, unsigned long flags) +{ + struct sh_dmae_slave *param; + struct sh_dmae_chan *sh_chan; + + if (!chan) + return NULL; + + sh_chan = to_sh_chan(chan); + param = chan->private; + + /* Someone calling slave DMA on a public channel? */ + if (!param || !sg_len) { + dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", + __func__, param, sg_len, param ? param->slave_id : -1); + return NULL; + } + + /* + * if (param != NULL), this is a successfully requested slave channel, + * therefore param->config != NULL too. + */ + return sh_dmae_prep_sg(sh_chan, sgl, sg_len, ¶m->config->addr, + direction, flags); +} + +static void sh_dmae_terminate_all(struct dma_chan *chan) +{ + struct sh_dmae_chan *sh_chan = to_sh_chan(chan); + + if (!chan) + return; + + sh_dmae_chan_ld_cleanup(sh_chan, true); +} + static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) { struct sh_desc *desc, *_desc; @@ -508,7 +593,11 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all cookie = tx->cookie; if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { - BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); + if (sh_chan->completed_cookie != desc->cookie - 1) + dev_dbg(sh_chan->dev, + "Completing cookie %d, expected %d\n", + desc->cookie, + sh_chan->completed_cookie + 1); sh_chan->completed_cookie = desc->cookie; } @@ -581,7 +670,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) return; } - /* Find the first un-transfer desciptor */ + /* Find the first not transferred desciptor */ list_for_each_entry(sd, &sh_chan->ld_queue, node) if (sd->mark == DESC_SUBMITTED) { /* Get the ld start address from ld_queue */ @@ -685,11 +774,14 @@ static void dmae_do_tasklet(unsigned long data) struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; struct sh_desc *desc; u32 sar_buf = sh_dmae_readl(sh_chan, SAR); + u32 dar_buf = sh_dmae_readl(sh_chan, DAR); spin_lock(&sh_chan->desc_lock); list_for_each_entry(desc, &sh_chan->ld_queue, node) { - if ((desc->hw.sar + desc->hw.tcr) == sar_buf && - desc->mark == DESC_SUBMITTED) { + if (desc->mark == DESC_SUBMITTED && + ((desc->direction == DMA_FROM_DEVICE && + (desc->hw.dar + desc->hw.tcr) == dar_buf) || + (desc->hw.sar + desc->hw.tcr) == sar_buf)) { dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", desc->async_tx.cookie, &desc->async_tx, desc->hw.dar); @@ -762,7 +854,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) } snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), - "sh-dmae%d", new_sh_chan->id); + "sh-dmae%d", new_sh_chan->id); /* set up channel irq */ err = request_irq(irq, &sh_dmae_interrupt, irqflags, @@ -773,11 +865,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) goto err_no_irq; } - /* CHCR register control function */ - new_sh_chan->set_chcr = dmae_set_chcr; - /* DMARS register control function */ - new_sh_chan->set_dmars = dmae_set_dmars; - shdev->chan[id] = new_sh_chan; return 0; @@ -848,12 +935,19 @@ static int __init sh_dmae_probe(struct platform_device *pdev) INIT_LIST_HEAD(&shdev->common.channels); dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); + dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); + shdev->common.device_alloc_chan_resources = sh_dmae_alloc_chan_resources; shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; shdev->common.device_is_tx_complete = sh_dmae_is_complete; shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; + + /* Compulsory for DMA_SLAVE fields */ + shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; + shdev->common.device_terminate_all = sh_dmae_terminate_all; + shdev->common.dev = &pdev->dev; /* Default transfer size of 32 bytes requires 32-byte alignment */ shdev->common.copy_align = 5; diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 108f1cffb6f..7e227f3c87c 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h @@ -29,6 +29,7 @@ struct sh_desc { struct sh_dmae_regs hw; struct list_head node; struct dma_async_tx_descriptor async_tx; + enum dma_data_direction direction; dma_cookie_t cookie; int chunks; int mark; @@ -45,13 +46,9 @@ struct sh_dmae_chan { struct device *dev; /* Channel device */ struct tasklet_struct tasklet; /* Tasklet */ int descs_allocated; /* desc count */ + int xmit_shift; /* log_2(bytes_per_xfer) */ int id; /* Raw id of this channel */ char dev_id[16]; /* unique name per DMAC of channel */ - - /* Set chcr */ - int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs); - /* Set DMA resource */ - int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res); }; struct sh_dmae_device { -- cgit v1.2.3-70-g09d2 From a29d8b8e2d811a24bbe49215a0f0c536b72ebc18 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 2 Feb 2010 14:39:15 +0900 Subject: percpu: add __percpu sparse annotations to what's left Add __percpu sparse annotations to places which didn't make it in one of the previous patches. All converions are trivial. These annotations are to make sparse consider percpu variables to be in a different address space and warn if accessed without going through percpu accessors. This patch doesn't affect normal builds. Signed-off-by: Tejun Heo Acked-by: Borislav Petkov Cc: Dan Williams Cc: Huang Ying Cc: Len Brown Cc: Neil Brown --- crypto/cryptd.c | 2 +- drivers/acpi/processor_perflib.c | 2 +- drivers/dma/dmaengine.c | 2 +- drivers/edac/amd64_edac.c | 2 +- drivers/md/raid5.c | 2 +- drivers/md/raid5.h | 2 +- include/acpi/processor.h | 2 +- include/linux/dmaengine.h | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/dma') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 704c1411532..ef71318976c 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -31,7 +31,7 @@ struct cryptd_cpu_queue { }; struct cryptd_queue { - struct cryptd_cpu_queue *cpu_queue; + struct cryptd_cpu_queue __percpu *cpu_queue; }; struct cryptd_instance_ctx { diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 2cabadcc4d8..8c6a6497d7f 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -557,7 +557,7 @@ end: } int acpi_processor_preregister_performance( - struct acpi_processor_performance *performance) + struct acpi_processor_performance __percpu *performance) { int count, count_target; int retval = 0; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 6f51a0a7a8b..4eadd98cea5 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -284,7 +284,7 @@ struct dma_chan_tbl_ent { /** * channel_table - percpu lookup table for memory-to-memory offload providers */ -static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; +static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; static int __init dma_channel_table_init(void) { diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 000dc67b85b..7b36c8838b2 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -13,7 +13,7 @@ module_param(report_gart_errors, int, 0644); static int ecc_enable_override; module_param(ecc_enable_override, int, 0644); -static struct msr *msrs; +static struct msr __percpu *msrs; /* Lookup table for all possible MC control instances */ struct amd64_pvt; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e84204eb12d..77cb3ab4bf4 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4680,7 +4680,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) { unsigned long cpu; struct page *spare_page; - struct raid5_percpu *allcpus; + struct raid5_percpu __percpu *allcpus; void *scribble; int err; diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index dd708359b45..0f86f5e3672 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -405,7 +405,7 @@ struct raid5_private_data { * lists and performing address * conversions */ - } *percpu; + } __percpu *percpu; size_t scribble_len; /* size of scribble region must be * associated with conf to handle * cpu hotplug while reshaping diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 0ea5ef4eb6a..477544fd8e9 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -238,7 +238,7 @@ struct acpi_processor_errata { extern int acpi_processor_preregister_performance(struct acpi_processor_performance - *performance); + __percpu *performance); extern int acpi_processor_register_performance(struct acpi_processor_performance *performance, unsigned int cpu); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 78784982b33..21fd9b7c6a4 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -162,7 +162,7 @@ struct dma_chan { struct dma_chan_dev *dev; struct list_head device_node; - struct dma_chan_percpu *local; + struct dma_chan_percpu __percpu *local; int client_count; int table_count; void *private; -- cgit v1.2.3-70-g09d2 From 6ca3a7a96e91b1aa8c704153c992b191d35b5747 Mon Sep 17 00:00:00 2001 From: "Steven J. Magnani" Date: Thu, 25 Feb 2010 13:39:30 -0600 Subject: fsldma: Fix cookie issues fsl_dma_tx_submit() only sets the cookie on the first descriptor of a transaction. It should set the cookie on all. Signed-off-by: Steven J. Magnani Acked-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 92efa87258b..6541ebf8bf6 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -362,7 +362,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) if (cookie < 0) cookie = 1; - desc->async_tx.cookie = cookie; + child->async_tx.cookie = cookie; } chan->common.cookie = cookie; -- cgit v1.2.3-70-g09d2 From 76bd061f5c7b7550cdaed68ad6219ea7cee288fc Mon Sep 17 00:00:00 2001 From: "Steven J. Magnani" Date: Sun, 28 Feb 2010 22:18:16 -0700 Subject: fsldma: Fix cookie issues fsl_dma_update_completed_cookie() appears to calculate the last completed cookie incorrectly in the corner case where DMA on cookie 1 is in progress just following a cookie wrap. Signed-off-by: Steven J. Magnani Acked-by: Ira W. Snyder [dan.j.williams@intel.com: fix an integer overflow warning with INT_MAX] Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 5 ++++- include/linux/dmaengine.h | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 6541ebf8bf6..bbb4be5a3ff 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -819,8 +819,11 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) desc = to_fsl_desc(chan->ld_running.prev); if (dma_is_idle(chan)) cookie = desc->async_tx.cookie; - else + else { cookie = desc->async_tx.cookie - 1; + if (unlikely(cookie < DMA_MIN_COOKIE)) + cookie = DMA_MAX_COOKIE; + } chan->completed_cookie = cookie; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 78784982b33..4d8d619f28b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -31,6 +31,8 @@ * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code */ typedef s32 dma_cookie_t; +#define DMA_MIN_COOKIE 1 +#define DMA_MAX_COOKIE INT_MAX #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) -- cgit v1.2.3-70-g09d2 From 94de648d72c8bc833590523f22386d4babbea988 Mon Sep 17 00:00:00 2001 From: Anatolij Gustschin Date: Mon, 15 Feb 2010 22:35:23 +0100 Subject: dmatest: correct raid6 PQ test The number of PQ sources specified by module parameter "pq_sources" is always forced odd to fit into dmatest's destination verificaton scheme. But number of PQ sources and coefficients as passed to the driver's prep_dma_pq() is not adjusted accordingly. Fix it now to get correct PQ testing results in the case passed "pq_sources" parameter is even. Signed-off-by: Anatolij Gustschin Signed-off-by: Andrew Morton Cc: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 8b905161fbf..cc0f3294a76 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -237,7 +237,7 @@ static int dmatest_func(void *data) dma_cookie_t cookie; enum dma_status status; enum dma_ctrl_flags flags; - u8 pq_coefs[pq_sources]; + u8 pq_coefs[pq_sources + 1]; int ret; int src_cnt; int dst_cnt; @@ -257,7 +257,7 @@ static int dmatest_func(void *data) } else if (thread->type == DMA_PQ) { src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ dst_cnt = 2; - for (i = 0; i < pq_sources; i++) + for (i = 0; i < src_cnt; i++) pq_coefs[i] = 1; } else goto err_srcs; @@ -355,7 +355,7 @@ static int dmatest_func(void *data) for (i = 0; i < dst_cnt; i++) dma_pq[i] = dma_dsts[i] + dst_off; tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, - pq_sources, pq_coefs, + src_cnt, pq_coefs, len, flags); } -- cgit v1.2.3-70-g09d2 From 67b9124f734b22b30d9adf18c39fe795e2901070 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sun, 28 Feb 2010 22:20:18 -0700 Subject: dmatest: fix handling of an even number of xor_sources Just like commit ac5d73fc, we need to be careful to use 'src_cnt' as it contains the fixed up number of xor sources (forced odd) to meet dmatest's data verification scheme. Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index cc0f3294a76..8e409fb50fc 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -347,7 +347,7 @@ static int dmatest_func(void *data) else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, dma_dsts[0] + dst_off, - dma_srcs, xor_sources, + dma_srcs, src_cnt, len, flags); else if (thread->type == DMA_PQ) { dma_addr_t dma_pq[dst_cnt]; -- cgit v1.2.3-70-g09d2 From 47a4dc26eeb89a3746f9b1e2092602b40469640a Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 11 Feb 2010 16:50:05 +0000 Subject: dmaengine: shdma: fix DMA error handling. Present DMA error ISR in shdma.c is bogus, it locks the system hard in multiple ways. Fix it to abort all queued transactions on all channels on the affected controller and giving submitters a chance to get a DMA_ERROR status for aborted transactions. Afterwards further functionality is again possible without the need to re-load the driver. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Paul Mundt --- drivers/dma/shdma.c | 89 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 53 insertions(+), 36 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index b75ce8b84c4..77311698c04 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -73,7 +73,7 @@ static void sh_dmae_ctl_stop(int id) { unsigned short dmaor = dmaor_read_reg(id); - dmaor &= ~(DMAOR_NMIF | DMAOR_AE); + dmaor &= ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); dmaor_write_reg(id, dmaor); } @@ -86,7 +86,7 @@ static int sh_dmae_rst(int id) dmaor_write_reg(id, dmaor); if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { - pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); + pr_warning("dma-sh: Can't initialize DMAOR.\n"); return -EINVAL; } return 0; @@ -661,7 +661,7 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) { - struct sh_desc *sd; + struct sh_desc *desc; spin_lock_bh(&sh_chan->desc_lock); /* DMA work check */ @@ -671,10 +671,10 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) } /* Find the first not transferred desciptor */ - list_for_each_entry(sd, &sh_chan->ld_queue, node) - if (sd->mark == DESC_SUBMITTED) { + list_for_each_entry(desc, &sh_chan->ld_queue, node) + if (desc->mark == DESC_SUBMITTED) { /* Get the ld start address from ld_queue */ - dmae_set_reg(sh_chan, &sd->hw); + dmae_set_reg(sh_chan, &desc->hw); dmae_start(sh_chan); break; } @@ -696,6 +696,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, struct sh_dmae_chan *sh_chan = to_sh_chan(chan); dma_cookie_t last_used; dma_cookie_t last_complete; + enum dma_status status; sh_dmae_chan_ld_cleanup(sh_chan, false); @@ -709,7 +710,27 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, if (used) *used = last_used; - return dma_async_is_complete(cookie, last_complete, last_used); + spin_lock_bh(&sh_chan->desc_lock); + + status = dma_async_is_complete(cookie, last_complete, last_used); + + /* + * If we don't find cookie on the queue, it has been aborted and we have + * to report error + */ + if (status != DMA_SUCCESS) { + struct sh_desc *desc; + status = DMA_ERROR; + list_for_each_entry(desc, &sh_chan->ld_queue, node) + if (desc->cookie == cookie) { + status = DMA_IN_PROGRESS; + break; + } + } + + spin_unlock_bh(&sh_chan->desc_lock); + + return status; } static irqreturn_t sh_dmae_interrupt(int irq, void *data) @@ -732,40 +753,36 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) #if defined(CONFIG_CPU_SH4) static irqreturn_t sh_dmae_err(int irq, void *data) { - int err = 0; struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; + int i; - /* IRQ Multi */ - if (shdev->pdata.mode & SHDMA_MIX_IRQ) { - int __maybe_unused cnt = 0; - switch (irq) { -#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) - case DMTE6_IRQ: - cnt++; -#endif - case DMTE0_IRQ: - if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { - disable_irq(irq); - return IRQ_HANDLED; + /* halt the dma controller */ + sh_dmae_ctl_stop(0); + if (shdev->pdata.mode & SHDMA_DMAOR1) + sh_dmae_ctl_stop(1); + + /* We cannot detect, which channel caused the error, have to reset all */ + for (i = 0; i < MAX_DMA_CHANNELS; i++) { + struct sh_dmae_chan *sh_chan = shdev->chan[i]; + if (sh_chan) { + struct sh_desc *desc; + /* Stop the channel */ + dmae_halt(sh_chan); + /* Complete all */ + list_for_each_entry(desc, &sh_chan->ld_queue, node) { + struct dma_async_tx_descriptor *tx = &desc->async_tx; + desc->mark = DESC_IDLE; + if (tx->callback) + tx->callback(tx->callback_param); } - default: - return IRQ_NONE; + list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); } - } else { - /* reset dma controller */ - err = sh_dmae_rst(0); - if (err) - return err; -#ifdef SH_DMAC_BASE1 - if (shdev->pdata.mode & SHDMA_DMAOR1) { - err = sh_dmae_rst(1); - if (err) - return err; - } -#endif - disable_irq(irq); - return IRQ_HANDLED; } + sh_dmae_rst(0); + if (shdev->pdata.mode & SHDMA_DMAOR1) + sh_dmae_rst(1); + + return IRQ_HANDLED; } #endif -- cgit v1.2.3-70-g09d2 From 027811b9b81a6b3ae5aa20c3302897bee9dcf09e Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 11 Feb 2010 16:50:10 +0000 Subject: dmaengine: shdma: convert to platform device resources The shdma dmaengine driver currently uses numerous macros to support various platforms, selected by ifdef's. Convert it to use platform device resources and lists of channel descriptors to specify register locations, interrupt numbers and other system-specific configuration variants. Unavoidably, we have to simultaneously convert all shdma users to provide those resources. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Paul Mundt --- arch/sh/include/asm/dma-sh.h | 13 +- arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 70 +++++++- arch/sh/kernel/cpu/sh4a/setup-sh7724.c | 159 ++++++++++++++++- arch/sh/kernel/cpu/sh4a/setup-sh7780.c | 113 +++++++++++- arch/sh/kernel/cpu/sh4a/setup-sh7785.c | 113 +++++++++++- drivers/dma/shdma.c | 318 ++++++++++++++++++++------------- drivers/dma/shdma.h | 6 +- 7 files changed, 636 insertions(+), 156 deletions(-) (limited to 'drivers/dma') diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index e934a2e6665..2e3631d6e27 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h @@ -154,10 +154,17 @@ struct sh_dmae_slave_config { char mid_rid; }; +struct sh_dmae_channel { + unsigned int offset; + unsigned int dmars; + unsigned int dmars_bit; +}; + struct sh_dmae_pdata { - unsigned int mode; - struct sh_dmae_slave_config *config; - int config_num; + struct sh_dmae_slave_config *slave; + int slave_num; + struct sh_dmae_channel *channel; + int channel_num; }; struct device; diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 538280a3dc6..aec182bed8a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -75,15 +75,79 @@ static struct sh_dmae_slave_config sh7722_dmae_slaves[] = { }, }; +static struct sh_dmae_channel sh7722_dmae_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + static struct sh_dmae_pdata dma_platform_data = { - .mode = 0, - .config = sh7722_dmae_slaves, - .config_num = ARRAY_SIZE(sh7722_dmae_slaves), + .slave = sh7722_dmae_slaves, + .slave_num = ARRAY_SIZE(sh7722_dmae_slaves), + .channel = sh7722_dmae_channels, + .channel_num = ARRAY_SIZE(sh7722_dmae_channels), +}; + +static struct resource sh7722_dmae_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xfe008020, + .end = 0xfe00808f, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* DMARSx */ + .start = 0xfe009000, + .end = 0xfe00900b, + .flags = IORESOURCE_MEM, + }, + { + /* DMA error IRQ */ + .start = 78, + .end = 78, + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 0-3 */ + .start = 48, + .end = 51, + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 4-5 */ + .start = 76, + .end = 77, + .flags = IORESOURCE_IRQ, + }, }; struct platform_device dma_device = { .name = "sh-dma-engine", .id = -1, + .resource = sh7722_dmae_resources, + .num_resources = ARRAY_SIZE(sh7722_dmae_resources), .dev = { .platform_data = &dma_platform_data, }, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index 31e3451f7e3..aca1fb2c571 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -28,15 +28,157 @@ #include /* DMA */ -static struct sh_dmae_pdata dma_platform_data = { - .mode = SHDMA_DMAOR1, +static struct sh_dmae_channel sh7724_dmae0_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + +static struct sh_dmae_channel sh7724_dmae1_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + +static struct sh_dmae_pdata dma0_platform_data = { + .channel = sh7724_dmae0_channels, + .channel_num = ARRAY_SIZE(sh7724_dmae0_channels), +}; + +static struct sh_dmae_pdata dma1_platform_data = { + .channel = sh7724_dmae1_channels, + .channel_num = ARRAY_SIZE(sh7724_dmae1_channels), +}; + +/* Resource order important! */ +static struct resource sh7724_dmae0_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xfe008020, + .end = 0xfe00808f, + .flags = IORESOURCE_MEM, + }, + { + /* DMARSx */ + .start = 0xfe009000, + .end = 0xfe00900b, + .flags = IORESOURCE_MEM, + }, + { + /* DMA error IRQ */ + .start = 78, + .end = 78, + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 0-3 */ + .start = 48, + .end = 51, + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 4-5 */ + .start = 76, + .end = 77, + .flags = IORESOURCE_IRQ, + }, }; -static struct platform_device dma_device = { - .name = "sh-dma-engine", - .id = -1, - .dev = { - .platform_data = &dma_platform_data, +/* Resource order important! */ +static struct resource sh7724_dmae1_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xfdc08020, + .end = 0xfdc0808f, + .flags = IORESOURCE_MEM, + }, + { + /* DMARSx */ + .start = 0xfdc09000, + .end = 0xfdc0900b, + .flags = IORESOURCE_MEM, + }, + { + /* DMA error IRQ */ + .start = 74, + .end = 74, + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 0-3 */ + .start = 40, + .end = 43, + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 4-5 */ + .start = 72, + .end = 73, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device dma0_device = { + .name = "sh-dma-engine", + .id = 0, + .resource = sh7724_dmae0_resources, + .num_resources = ARRAY_SIZE(sh7724_dmae0_resources), + .dev = { + .platform_data = &dma0_platform_data, + }, +}; + +static struct platform_device dma1_device = { + .name = "sh-dma-engine", + .id = 1, + .resource = sh7724_dmae1_resources, + .num_resources = ARRAY_SIZE(sh7724_dmae1_resources), + .dev = { + .platform_data = &dma1_platform_data, }, }; @@ -663,7 +805,8 @@ static struct platform_device *sh7724_devices[] __initdata = { &tmu3_device, &tmu4_device, &tmu5_device, - &dma_device, + &dma0_device, + &dma1_device, &rtc_device, &iic0_device, &iic1_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index f8f21618d78..338dfc2c2bb 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c @@ -247,15 +247,115 @@ static struct platform_device rtc_device = { .resource = rtc_resources, }; -static struct sh_dmae_pdata dma_platform_data = { - .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1), +/* DMA */ +static struct sh_dmae_channel sh7780_dmae0_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + +static struct sh_dmae_channel sh7780_dmae1_channels[] = { + { + .offset = 0, + }, { + .offset = 0x10, + }, { + .offset = 0x20, + }, { + .offset = 0x30, + }, { + .offset = 0x50, + }, { + .offset = 0x60, + } +}; + +static struct sh_dmae_pdata dma0_platform_data = { + .channel = sh7780_dmae0_channels, + .channel_num = ARRAY_SIZE(sh7780_dmae0_channels), +}; + +static struct sh_dmae_pdata dma1_platform_data = { + .channel = sh7780_dmae1_channels, + .channel_num = ARRAY_SIZE(sh7780_dmae1_channels), +}; + +static struct resource sh7780_dmae0_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xfc808020, + .end = 0xfc80808f, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* DMARSx */ + .start = 0xfc809000, + .end = 0xfc80900b, + .flags = IORESOURCE_MEM, + }, + { + /* Real DMA error IRQ is 38, and channel IRQs are 34-37, 44-45 */ + .start = 34, + .end = 34, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +static struct resource sh7780_dmae1_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xfc818020, + .end = 0xfc81808f, + .flags = IORESOURCE_MEM, + }, + /* DMAC1 has no DMARS */ + { + /* Real DMA error IRQ is 38, and channel IRQs are 46-47, 92-95 */ + .start = 46, + .end = 46, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, }; -static struct platform_device dma_device = { +static struct platform_device dma0_device = { .name = "sh-dma-engine", - .id = -1, + .id = 0, + .resource = sh7780_dmae0_resources, + .num_resources = ARRAY_SIZE(sh7780_dmae0_resources), .dev = { - .platform_data = &dma_platform_data, + .platform_data = &dma0_platform_data, + }, +}; + +static struct platform_device dma1_device = { + .name = "sh-dma-engine", + .id = 1, + .resource = sh7780_dmae1_resources, + .num_resources = ARRAY_SIZE(sh7780_dmae1_resources), + .dev = { + .platform_data = &dma1_platform_data, }, }; @@ -269,7 +369,8 @@ static struct platform_device *sh7780_devices[] __initdata = { &tmu4_device, &tmu5_device, &rtc_device, - &dma_device, + &dma0_device, + &dma1_device, }; static int __init sh7780_devices_setup(void) diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index 23448d8c671..fbb5d1f51f1 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c @@ -295,15 +295,115 @@ static struct platform_device tmu5_device = { .num_resources = ARRAY_SIZE(tmu5_resources), }; -static struct sh_dmae_pdata dma_platform_data = { - .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1), +/* DMA */ +static struct sh_dmae_channel sh7785_dmae0_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + +static struct sh_dmae_channel sh7785_dmae1_channels[] = { + { + .offset = 0, + }, { + .offset = 0x10, + }, { + .offset = 0x20, + }, { + .offset = 0x30, + }, { + .offset = 0x50, + }, { + .offset = 0x60, + } +}; + +static struct sh_dmae_pdata dma0_platform_data = { + .channel = sh7785_dmae0_channels, + .channel_num = ARRAY_SIZE(sh7785_dmae0_channels), +}; + +static struct sh_dmae_pdata dma1_platform_data = { + .channel = sh7785_dmae1_channels, + .channel_num = ARRAY_SIZE(sh7785_dmae1_channels), +}; + +static struct resource sh7785_dmae0_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xfc808020, + .end = 0xfc80808f, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* DMARSx */ + .start = 0xfc809000, + .end = 0xfc80900b, + .flags = IORESOURCE_MEM, + }, + { + /* Real DMA error IRQ is 39, and channel IRQs are 33-38 */ + .start = 33, + .end = 33, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +static struct resource sh7785_dmae1_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xfcc08020, + .end = 0xfcc0808f, + .flags = IORESOURCE_MEM, + }, + /* DMAC1 has no DMARS */ + { + /* Real DMA error IRQ is 58, and channel IRQs are 52-57 */ + .start = 52, + .end = 52, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, }; -static struct platform_device dma_device = { +static struct platform_device dma0_device = { .name = "sh-dma-engine", - .id = -1, + .id = 0, + .resource = sh7785_dmae0_resources, + .num_resources = ARRAY_SIZE(sh7785_dmae0_resources), .dev = { - .platform_data = &dma_platform_data, + .platform_data = &dma0_platform_data, + }, +}; + +static struct platform_device dma1_device = { + .name = "sh-dma-engine", + .id = 1, + .resource = sh7785_dmae1_resources, + .num_resources = ARRAY_SIZE(sh7785_dmae1_resources), + .dev = { + .platform_data = &dma1_platform_data, }, }; @@ -320,7 +420,8 @@ static struct platform_device *sh7785_devices[] __initdata = { &tmu3_device, &tmu4_device, &tmu5_device, - &dma_device, + &dma0_device, + &dma1_device, }; static int __init sh7785_devices_setup(void) diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 77311698c04..ab12fa5a129 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -53,15 +53,24 @@ static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); -#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) { - ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); + __raw_writel(data, sh_dc->base + reg / sizeof(u32)); } static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) { - return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); + return __raw_readl(sh_dc->base + reg / sizeof(u32)); +} + +static u16 dmaor_read(struct sh_dmae_device *shdev) +{ + return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); +} + +static void dmaor_write(struct sh_dmae_device *shdev, u16 data) +{ + __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); } /* @@ -69,23 +78,22 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) * * SH7780 has two DMAOR register */ -static void sh_dmae_ctl_stop(int id) +static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) { - unsigned short dmaor = dmaor_read_reg(id); + unsigned short dmaor = dmaor_read(shdev); - dmaor &= ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); - dmaor_write_reg(id, dmaor); + dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); } -static int sh_dmae_rst(int id) +static int sh_dmae_rst(struct sh_dmae_device *shdev) { unsigned short dmaor; - sh_dmae_ctl_stop(id); - dmaor = dmaor_read_reg(id) | DMAOR_INIT; + sh_dmae_ctl_stop(shdev); + dmaor = dmaor_read(shdev) | DMAOR_INIT; - dmaor_write_reg(id, dmaor); - if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { + dmaor_write(shdev, dmaor); + if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { pr_warning("dma-sh: Can't initialize DMAOR.\n"); return -EINVAL; } @@ -153,31 +161,20 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) return 0; } -#define DMARS_SHIFT 8 -#define DMARS_CHAN_MSK 0x01 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) { - u32 addr; - int shift = 0; + struct sh_dmae_device *shdev = container_of(sh_chan->common.device, + struct sh_dmae_device, common); + struct sh_dmae_pdata *pdata = shdev->pdata; + struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; + u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); + int shift = chan_pdata->dmars_bit; if (dmae_is_busy(sh_chan)) return -EBUSY; - if (sh_chan->id & DMARS_CHAN_MSK) - shift = DMARS_SHIFT; - - if (sh_chan->id < 6) - /* DMA0RS0 - DMA0RS2 */ - addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4; -#ifdef SH_DMARS_BASE1 - else if (sh_chan->id < 12) - /* DMA1RS0 - DMA1RS2 */ - addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4; -#endif - else - return -EINVAL; - - ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr); + __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), + addr); return 0; } @@ -251,15 +248,15 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave( struct dma_device *dma_dev = sh_chan->common.device; struct sh_dmae_device *shdev = container_of(dma_dev, struct sh_dmae_device, common); - struct sh_dmae_pdata *pdata = &shdev->pdata; + struct sh_dmae_pdata *pdata = shdev->pdata; int i; if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) return NULL; - for (i = 0; i < pdata->config_num; i++) - if (pdata->config[i].slave_id == slave_id) - return pdata->config + i; + for (i = 0; i < pdata->slave_num; i++) + if (pdata->slave[i].slave_id == slave_id) + return pdata->slave + i; return NULL; } @@ -757,9 +754,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data) int i; /* halt the dma controller */ - sh_dmae_ctl_stop(0); - if (shdev->pdata.mode & SHDMA_DMAOR1) - sh_dmae_ctl_stop(1); + sh_dmae_ctl_stop(shdev); /* We cannot detect, which channel caused the error, have to reset all */ for (i = 0; i < MAX_DMA_CHANNELS; i++) { @@ -778,9 +773,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data) list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); } } - sh_dmae_rst(0); - if (shdev->pdata.mode & SHDMA_DMAOR1) - sh_dmae_rst(1); + sh_dmae_rst(shdev); return IRQ_HANDLED; } @@ -813,19 +806,12 @@ static void dmae_do_tasklet(unsigned long data) sh_dmae_chan_ld_cleanup(sh_chan, false); } -static unsigned int get_dmae_irq(unsigned int id) -{ - unsigned int irq = 0; - if (id < ARRAY_SIZE(dmte_irq_map)) - irq = dmte_irq_map[id]; - return irq; -} - -static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) +static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, + int irq, unsigned long flags) { int err; - unsigned int irq = get_dmae_irq(id); - unsigned long irqflags = IRQF_DISABLED; + struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; + struct platform_device *pdev = to_platform_device(shdev->common.dev); struct sh_dmae_chan *new_sh_chan; /* alloc channel */ @@ -838,6 +824,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) new_sh_chan->dev = shdev->common.dev; new_sh_chan->id = id; + new_sh_chan->irq = irq; + new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); /* Init DMA tasklet */ tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, @@ -860,21 +848,15 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) &shdev->common.channels); shdev->common.chancnt++; - if (shdev->pdata.mode & SHDMA_MIX_IRQ) { - irqflags = IRQF_SHARED; -#if defined(DMTE6_IRQ) - if (irq >= DMTE6_IRQ) - irq = DMTE6_IRQ; - else -#endif - irq = DMTE0_IRQ; - } - - snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), - "sh-dmae%d", new_sh_chan->id); + if (pdev->id >= 0) + snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), + "sh-dmae%d.%d", pdev->id, new_sh_chan->id); + else + snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), + "sh-dma%d", new_sh_chan->id); /* set up channel irq */ - err = request_irq(irq, &sh_dmae_interrupt, irqflags, + err = request_irq(irq, &sh_dmae_interrupt, flags, new_sh_chan->dev_id, new_sh_chan); if (err) { dev_err(shdev->common.dev, "DMA channel %d request_irq error " @@ -898,12 +880,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { if (shdev->chan[i]) { - struct sh_dmae_chan *shchan = shdev->chan[i]; - if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) - free_irq(dmte_irq_map[i], shchan); + struct sh_dmae_chan *sh_chan = shdev->chan[i]; + + free_irq(sh_chan->irq, sh_chan); - list_del(&shchan->common.device_node); - kfree(shchan); + list_del(&sh_chan->common.device_node); + kfree(sh_chan); shdev->chan[i] = NULL; } } @@ -912,47 +894,81 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) static int __init sh_dmae_probe(struct platform_device *pdev) { - int err = 0, cnt, ecnt; - unsigned long irqflags = IRQF_DISABLED; -#if defined(CONFIG_CPU_SH4) - int eirq[] = { DMAE0_IRQ, -#if defined(DMAE1_IRQ) - DMAE1_IRQ -#endif - }; -#endif + struct sh_dmae_pdata *pdata = pdev->dev.platform_data; + unsigned long irqflags = IRQF_DISABLED, + chan_flag[MAX_DMA_CHANNELS] = {}; + int errirq, chan_irq[MAX_DMA_CHANNELS]; + int err, i, irq_cnt = 0, irqres = 0; struct sh_dmae_device *shdev; + struct resource *chan, *dmars, *errirq_res, *chanirq_res; /* get platform data */ - if (!pdev->dev.platform_data) + if (!pdata || !pdata->channel_num) return -ENODEV; + chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); + /* DMARS area is optional, if absent, this controller cannot do slave DMA */ + dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); + /* + * IRQ resources: + * 1. there always must be at least one IRQ IO-resource. On SH4 it is + * the error IRQ, in which case it is the only IRQ in this resource: + * start == end. If it is the only IRQ resource, all channels also + * use the same IRQ. + * 2. DMA channel IRQ resources can be specified one per resource or in + * ranges (start != end) + * 3. iff all events (channels and, optionally, error) on this + * controller use the same IRQ, only one IRQ resource can be + * specified, otherwise there must be one IRQ per channel, even if + * some of them are equal + * 4. if all IRQs on this controller are equal or if some specific IRQs + * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be + * requested with the IRQF_SHARED flag + */ + errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!chan || !errirq_res) + return -ENODEV; + + if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { + dev_err(&pdev->dev, "DMAC register region already claimed\n"); + return -EBUSY; + } + + if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { + dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); + err = -EBUSY; + goto ermrdmars; + } + + err = -ENOMEM; shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); if (!shdev) { - dev_err(&pdev->dev, "No enough memory\n"); - return -ENOMEM; + dev_err(&pdev->dev, "Not enough memory\n"); + goto ealloc; + } + + shdev->chan_reg = ioremap(chan->start, resource_size(chan)); + if (!shdev->chan_reg) + goto emapchan; + if (dmars) { + shdev->dmars = ioremap(dmars->start, resource_size(dmars)); + if (!shdev->dmars) + goto emapdmars; } /* platform data */ - memcpy(&shdev->pdata, pdev->dev.platform_data, - sizeof(struct sh_dmae_pdata)); + shdev->pdata = pdata; /* reset dma controller */ - err = sh_dmae_rst(0); + err = sh_dmae_rst(shdev); if (err) goto rst_err; - /* SH7780/85/23 has DMAOR1 */ - if (shdev->pdata.mode & SHDMA_DMAOR1) { - err = sh_dmae_rst(1); - if (err) - goto rst_err; - } - INIT_LIST_HEAD(&shdev->common.channels); dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); - dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); + if (dmars) + dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); shdev->common.device_alloc_chan_resources = sh_dmae_alloc_chan_resources; @@ -970,30 +986,63 @@ static int __init sh_dmae_probe(struct platform_device *pdev) shdev->common.copy_align = 5; #if defined(CONFIG_CPU_SH4) - /* Non Mix IRQ mode SH7722/SH7730 etc... */ - if (shdev->pdata.mode & SHDMA_MIX_IRQ) { + chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + + if (!chanirq_res) + chanirq_res = errirq_res; + else + irqres++; + + if (chanirq_res == errirq_res || + (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) irqflags = IRQF_SHARED; - eirq[0] = DMTE0_IRQ; -#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) - eirq[1] = DMTE6_IRQ; -#endif + + errirq = errirq_res->start; + + err = request_irq(errirq, sh_dmae_err, irqflags, + "DMAC Address Error", shdev); + if (err) { + dev_err(&pdev->dev, + "DMA failed requesting irq #%d, error %d\n", + errirq, err); + goto eirq_err; } - for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { - err = request_irq(eirq[ecnt], sh_dmae_err, irqflags, - "DMAC Address Error", shdev); - if (err) { - dev_err(&pdev->dev, "DMA device request_irq" - "error (irq %d) with return %d\n", - eirq[ecnt], err); - goto eirq_err; +#else + chanirq_res = errirq_res; +#endif /* CONFIG_CPU_SH4 */ + + if (chanirq_res->start == chanirq_res->end && + !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { + /* Special case - all multiplexed */ + for (; irq_cnt < pdata->channel_num; irq_cnt++) { + chan_irq[irq_cnt] = chanirq_res->start; + chan_flag[irq_cnt] = IRQF_SHARED; } + } else { + do { + for (i = chanirq_res->start; i <= chanirq_res->end; i++) { + if ((errirq_res->flags & IORESOURCE_BITS) == + IORESOURCE_IRQ_SHAREABLE) + chan_flag[irq_cnt] = IRQF_SHARED; + else + chan_flag[irq_cnt] = IRQF_DISABLED; + dev_dbg(&pdev->dev, + "Found IRQ %d for channel %d\n", + i, irq_cnt); + chan_irq[irq_cnt++] = i; + } + chanirq_res = platform_get_resource(pdev, + IORESOURCE_IRQ, ++irqres); + } while (irq_cnt < pdata->channel_num && chanirq_res); } -#endif /* CONFIG_CPU_SH4 */ + + if (irq_cnt < pdata->channel_num) + goto eirqres; /* Create DMA Channel */ - for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { - err = sh_dmae_chan_probe(shdev, cnt); + for (i = 0; i < pdata->channel_num; i++) { + err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); if (err) goto chan_probe_err; } @@ -1005,13 +1054,23 @@ static int __init sh_dmae_probe(struct platform_device *pdev) chan_probe_err: sh_dmae_chan_remove(shdev); - +eirqres: +#if defined(CONFIG_CPU_SH4) + free_irq(errirq, shdev); eirq_err: - for (ecnt-- ; ecnt >= 0; ecnt--) - free_irq(eirq[ecnt], shdev); - +#endif rst_err: + if (dmars) + iounmap(shdev->dmars); +emapdmars: + iounmap(shdev->chan_reg); +emapchan: kfree(shdev); +ealloc: + if (dmars) + release_mem_region(dmars->start, resource_size(dmars)); +ermrdmars: + release_mem_region(chan->start, resource_size(chan)); return err; } @@ -1019,36 +1078,37 @@ rst_err: static int __exit sh_dmae_remove(struct platform_device *pdev) { struct sh_dmae_device *shdev = platform_get_drvdata(pdev); + struct resource *res; + int errirq = platform_get_irq(pdev, 0); dma_async_device_unregister(&shdev->common); - if (shdev->pdata.mode & SHDMA_MIX_IRQ) { - free_irq(DMTE0_IRQ, shdev); -#if defined(DMTE6_IRQ) - free_irq(DMTE6_IRQ, shdev); -#endif - } + if (errirq > 0) + free_irq(errirq, shdev); /* channel data remove */ sh_dmae_chan_remove(shdev); - if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { - free_irq(DMAE0_IRQ, shdev); -#if defined(DMAE1_IRQ) - free_irq(DMAE1_IRQ, shdev); -#endif - } + if (shdev->dmars) + iounmap(shdev->dmars); + iounmap(shdev->chan_reg); + kfree(shdev); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, resource_size(res)); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) + release_mem_region(res->start, resource_size(res)); + return 0; } static void sh_dmae_shutdown(struct platform_device *pdev) { struct sh_dmae_device *shdev = platform_get_drvdata(pdev); - sh_dmae_ctl_stop(0); - if (shdev->pdata.mode & SHDMA_DMAOR1) - sh_dmae_ctl_stop(1); + sh_dmae_ctl_stop(shdev); } static struct platform_driver sh_dmae_driver = { diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 7e227f3c87c..800fd884be8 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h @@ -47,14 +47,18 @@ struct sh_dmae_chan { struct tasklet_struct tasklet; /* Tasklet */ int descs_allocated; /* desc count */ int xmit_shift; /* log_2(bytes_per_xfer) */ + int irq; int id; /* Raw id of this channel */ + u32 __iomem *base; char dev_id[16]; /* unique name per DMAC of channel */ }; struct sh_dmae_device { struct dma_device common; struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; - struct sh_dmae_pdata pdata; + struct sh_dmae_pdata *pdata; + u32 __iomem *chan_reg; + u16 __iomem *dmars; }; #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) -- cgit v1.2.3-70-g09d2 From 8b1935e6a36b0967efc593d67ed3aebbfbc1f5b1 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 11 Feb 2010 16:50:14 +0000 Subject: dmaengine: shdma: separate DMA headers. Separate SH DMA headers into ones, commonly used by both drivers, and ones, specific to each of them. This will make the future development of the dmaengine driver easier. Signed-off-by: Guennadi Liakhovetski Acked-by: Mark Brown Signed-off-by: Paul Mundt --- arch/sh/include/asm/dma-register.h | 51 +++++++++++++ arch/sh/include/asm/dma-sh.h | 95 +---------------------- arch/sh/include/asm/dmaengine.h | 73 ++++++++++++++++++ arch/sh/include/asm/siu.h | 2 +- arch/sh/include/cpu-sh3/cpu/dma-register.h | 41 ++++++++++ arch/sh/include/cpu-sh3/cpu/dma.h | 27 ------- arch/sh/include/cpu-sh4/cpu/dma-register.h | 116 +++++++++++++++++++++++++++++ arch/sh/include/cpu-sh4/cpu/dma-sh4a.h | 62 --------------- arch/sh/include/cpu-sh4/cpu/dma.h | 36 +-------- arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 20 ++++- arch/sh/kernel/cpu/sh4a/setup-sh7724.c | 21 +++++- arch/sh/kernel/cpu/sh4a/setup-sh7780.c | 21 +++++- arch/sh/kernel/cpu/sh4a/setup-sh7785.c | 21 +++++- drivers/dma/shdma.c | 79 ++++++++++++-------- drivers/dma/shdma.h | 4 +- sound/soc/sh/siu.h | 2 +- sound/soc/sh/siu_pcm.c | 2 +- 17 files changed, 415 insertions(+), 258 deletions(-) create mode 100644 arch/sh/include/asm/dma-register.h create mode 100644 arch/sh/include/asm/dmaengine.h create mode 100644 arch/sh/include/cpu-sh3/cpu/dma-register.h create mode 100644 arch/sh/include/cpu-sh4/cpu/dma-register.h (limited to 'drivers/dma') diff --git a/arch/sh/include/asm/dma-register.h b/arch/sh/include/asm/dma-register.h new file mode 100644 index 00000000000..51cd78feacf --- /dev/null +++ b/arch/sh/include/asm/dma-register.h @@ -0,0 +1,51 @@ +/* + * Common header for the legacy SH DMA driver and the new dmaengine driver + * + * extracted from arch/sh/include/asm/dma-sh.h: + * + * Copyright (C) 2000 Takashi YOSHII + * Copyright (C) 2003 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef DMA_REGISTER_H +#define DMA_REGISTER_H + +/* DMA register */ +#define SAR 0x00 +#define DAR 0x04 +#define TCR 0x08 +#define CHCR 0x0C +#define DMAOR 0x40 + +/* DMAOR definitions */ +#define DMAOR_AE 0x00000004 +#define DMAOR_NMIF 0x00000002 +#define DMAOR_DME 0x00000001 + +/* Definitions for the SuperH DMAC */ +#define REQ_L 0x00000000 +#define REQ_E 0x00080000 +#define RACK_H 0x00000000 +#define RACK_L 0x00040000 +#define ACK_R 0x00000000 +#define ACK_W 0x00020000 +#define ACK_H 0x00000000 +#define ACK_L 0x00010000 +#define DM_INC 0x00004000 +#define DM_DEC 0x00008000 +#define DM_FIX 0x0000c000 +#define SM_INC 0x00001000 +#define SM_DEC 0x00002000 +#define SM_FIX 0x00003000 +#define RS_IN 0x00000200 +#define RS_OUT 0x00000300 +#define TS_BLK 0x00000040 +#define TM_BUR 0x00000020 +#define CHCR_DE 0x00000001 +#define CHCR_TE 0x00000002 +#define CHCR_IE 0x00000004 + +#endif diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index 2e3631d6e27..f3acb8e34c6 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h @@ -11,7 +11,8 @@ #ifndef __DMA_SH_H #define __DMA_SH_H -#include +#include +#include #include /* DMAOR contorl: The DMAOR access size is different by CPU.*/ @@ -53,34 +54,6 @@ static int dmte_irq_map[] __maybe_unused = { #endif }; -/* Definitions for the SuperH DMAC */ -#define REQ_L 0x00000000 -#define REQ_E 0x00080000 -#define RACK_H 0x00000000 -#define RACK_L 0x00040000 -#define ACK_R 0x00000000 -#define ACK_W 0x00020000 -#define ACK_H 0x00000000 -#define ACK_L 0x00010000 -#define DM_INC 0x00004000 -#define DM_DEC 0x00008000 -#define DM_FIX 0x0000c000 -#define SM_INC 0x00001000 -#define SM_DEC 0x00002000 -#define SM_FIX 0x00003000 -#define RS_IN 0x00000200 -#define RS_OUT 0x00000300 -#define TS_BLK 0x00000040 -#define TM_BUR 0x00000020 -#define CHCR_DE 0x00000001 -#define CHCR_TE 0x00000002 -#define CHCR_IE 0x00000004 - -/* DMAOR definitions */ -#define DMAOR_AE 0x00000004 -#define DMAOR_NMIF 0x00000002 -#define DMAOR_DME 0x00000001 - /* * Define the default configuration for dual address memory-memory transfer. * The 0x400 value represents auto-request, external->external. @@ -111,68 +84,4 @@ static u32 dma_base_addr[] __maybe_unused = { #endif }; -/* DMA register */ -#define SAR 0x00 -#define DAR 0x04 -#define TCR 0x08 -#define CHCR 0x0C -#define DMAOR 0x40 - -/* - * for dma engine - * - * SuperH DMA mode - */ -#define SHDMA_MIX_IRQ (1 << 1) -#define SHDMA_DMAOR1 (1 << 2) -#define SHDMA_DMAE1 (1 << 3) - -enum sh_dmae_slave_chan_id { - SHDMA_SLAVE_SCIF0_TX, - SHDMA_SLAVE_SCIF0_RX, - SHDMA_SLAVE_SCIF1_TX, - SHDMA_SLAVE_SCIF1_RX, - SHDMA_SLAVE_SCIF2_TX, - SHDMA_SLAVE_SCIF2_RX, - SHDMA_SLAVE_SCIF3_TX, - SHDMA_SLAVE_SCIF3_RX, - SHDMA_SLAVE_SCIF4_TX, - SHDMA_SLAVE_SCIF4_RX, - SHDMA_SLAVE_SCIF5_TX, - SHDMA_SLAVE_SCIF5_RX, - SHDMA_SLAVE_SIUA_TX, - SHDMA_SLAVE_SIUA_RX, - SHDMA_SLAVE_SIUB_TX, - SHDMA_SLAVE_SIUB_RX, - SHDMA_SLAVE_NUMBER, /* Must stay last */ -}; - -struct sh_dmae_slave_config { - enum sh_dmae_slave_chan_id slave_id; - dma_addr_t addr; - u32 chcr; - char mid_rid; -}; - -struct sh_dmae_channel { - unsigned int offset; - unsigned int dmars; - unsigned int dmars_bit; -}; - -struct sh_dmae_pdata { - struct sh_dmae_slave_config *slave; - int slave_num; - struct sh_dmae_channel *channel; - int channel_num; -}; - -struct device; - -struct sh_dmae_slave { - enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */ - struct device *dma_dev; /* Set by the platform */ - struct sh_dmae_slave_config *config; /* Set by the driver */ -}; - #endif /* __DMA_SH_H */ diff --git a/arch/sh/include/asm/dmaengine.h b/arch/sh/include/asm/dmaengine.h new file mode 100644 index 00000000000..9586e4a482b --- /dev/null +++ b/arch/sh/include/asm/dmaengine.h @@ -0,0 +1,73 @@ +/* + * Header for the new SH dmaengine driver + * + * Copyright (C) 2010 Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_DMAENGINE_H +#define ASM_DMAENGINE_H + +#include + +#define SH_DMAC_MAX_CHANNELS 6 + +enum sh_dmae_slave_chan_id { + SHDMA_SLAVE_SCIF0_TX, + SHDMA_SLAVE_SCIF0_RX, + SHDMA_SLAVE_SCIF1_TX, + SHDMA_SLAVE_SCIF1_RX, + SHDMA_SLAVE_SCIF2_TX, + SHDMA_SLAVE_SCIF2_RX, + SHDMA_SLAVE_SCIF3_TX, + SHDMA_SLAVE_SCIF3_RX, + SHDMA_SLAVE_SCIF4_TX, + SHDMA_SLAVE_SCIF4_RX, + SHDMA_SLAVE_SCIF5_TX, + SHDMA_SLAVE_SCIF5_RX, + SHDMA_SLAVE_SIUA_TX, + SHDMA_SLAVE_SIUA_RX, + SHDMA_SLAVE_SIUB_TX, + SHDMA_SLAVE_SIUB_RX, + SHDMA_SLAVE_NUMBER, /* Must stay last */ +}; + +struct sh_dmae_slave_config { + enum sh_dmae_slave_chan_id slave_id; + dma_addr_t addr; + u32 chcr; + char mid_rid; +}; + +struct sh_dmae_channel { + unsigned int offset; + unsigned int dmars; + unsigned int dmars_bit; +}; + +struct sh_dmae_pdata { + struct sh_dmae_slave_config *slave; + int slave_num; + struct sh_dmae_channel *channel; + int channel_num; + unsigned int ts_low_shift; + unsigned int ts_low_mask; + unsigned int ts_high_shift; + unsigned int ts_high_mask; + unsigned int *ts_shift; + int ts_shift_num; + u16 dmaor_init; +}; + +struct device; + +/* Used by slave DMA clients to request DMA to/from a specific peripheral */ +struct sh_dmae_slave { + enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */ + struct device *dma_dev; /* Set by the platform */ + struct sh_dmae_slave_config *config; /* Set by the driver */ +}; + +#endif diff --git a/arch/sh/include/asm/siu.h b/arch/sh/include/asm/siu.h index 57565a3b551..f1b1e6944a5 100644 --- a/arch/sh/include/asm/siu.h +++ b/arch/sh/include/asm/siu.h @@ -11,7 +11,7 @@ #ifndef ASM_SIU_H #define ASM_SIU_H -#include +#include struct device; diff --git a/arch/sh/include/cpu-sh3/cpu/dma-register.h b/arch/sh/include/cpu-sh3/cpu/dma-register.h new file mode 100644 index 00000000000..2349e488c9a --- /dev/null +++ b/arch/sh/include/cpu-sh3/cpu/dma-register.h @@ -0,0 +1,41 @@ +/* + * SH3 CPU-specific DMA definitions, used by both DMA drivers + * + * Copyright (C) 2010 Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef CPU_DMA_REGISTER_H +#define CPU_DMA_REGISTER_H + +#define CHCR_TS_LOW_MASK 0x18 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 + +#define DMAOR_INIT DMAOR_DME + +/* + * The SuperH DMAC supports a number of transmit sizes, we list them here, + * with their respective values as they appear in the CHCR registers. + */ +enum { + XMIT_SZ_8BIT, + XMIT_SZ_16BIT, + XMIT_SZ_32BIT, + XMIT_SZ_128BIT, +}; + +/* log2(size / 8) - used to calculate number of transfers */ +#define TS_SHIFT { \ + [XMIT_SZ_8BIT] = 0, \ + [XMIT_SZ_16BIT] = 1, \ + [XMIT_SZ_32BIT] = 2, \ + [XMIT_SZ_128BIT] = 4, \ +} + +#define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT) + +#endif diff --git a/arch/sh/include/cpu-sh3/cpu/dma.h b/arch/sh/include/cpu-sh3/cpu/dma.h index 207811a7a65..24e28b91c9d 100644 --- a/arch/sh/include/cpu-sh3/cpu/dma.h +++ b/arch/sh/include/cpu-sh3/cpu/dma.h @@ -20,31 +20,4 @@ #define TS_32 0x00000010 #define TS_128 0x00000018 -#define CHCR_TS_LOW_MASK 0x18 -#define CHCR_TS_LOW_SHIFT 3 -#define CHCR_TS_HIGH_MASK 0 -#define CHCR_TS_HIGH_SHIFT 0 - -#define DMAOR_INIT DMAOR_DME - -/* - * The SuperH DMAC supports a number of transmit sizes, we list them here, - * with their respective values as they appear in the CHCR registers. - */ -enum { - XMIT_SZ_8BIT, - XMIT_SZ_16BIT, - XMIT_SZ_32BIT, - XMIT_SZ_128BIT, -}; - -#define TS_SHIFT { \ - [XMIT_SZ_8BIT] = 0, \ - [XMIT_SZ_16BIT] = 1, \ - [XMIT_SZ_32BIT] = 2, \ - [XMIT_SZ_128BIT] = 4, \ -} - -#define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT) - #endif /* __ASM_CPU_SH3_DMA_H */ diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h new file mode 100644 index 00000000000..008e7fc8f6c --- /dev/null +++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h @@ -0,0 +1,116 @@ +/* + * SH4 CPU-specific DMA definitions, used by both DMA drivers + * + * Copyright (C) 2010 Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef CPU_DMA_REGISTER_H +#define CPU_DMA_REGISTER_H + +/* SH7751/7760/7780 DMA IRQ sources */ + +#ifdef CONFIG_CPU_SH4A + +#define DMAOR_INIT DMAOR_DME + +#if defined(CONFIG_CPU_SUBTYPE_SH7343) || \ + defined(CONFIG_CPU_SUBTYPE_SH7730) +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 +#elif defined(CONFIG_CPU_SUBTYPE_SH7722) +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0x00300000 +#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */ +#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ + defined(CONFIG_CPU_SUBTYPE_SH7764) +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 +#elif defined(CONFIG_CPU_SUBTYPE_SH7723) +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 +#elif defined(CONFIG_CPU_SUBTYPE_SH7724) +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0x00600000 +#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */ +#elif defined(CONFIG_CPU_SUBTYPE_SH7780) +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 +#else /* SH7785 */ +#define CHCR_TS_LOW_MASK 0x00000018 +#define CHCR_TS_LOW_SHIFT 3 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 +#endif + +/* Transmit sizes and respective CHCR register values */ +enum { + XMIT_SZ_8BIT = 0, + XMIT_SZ_16BIT = 1, + XMIT_SZ_32BIT = 2, + XMIT_SZ_64BIT = 7, + XMIT_SZ_128BIT = 3, + XMIT_SZ_256BIT = 4, + XMIT_SZ_128BIT_BLK = 0xb, + XMIT_SZ_256BIT_BLK = 0xc, +}; + +/* log2(size / 8) - used to calculate number of transfers */ +#define TS_SHIFT { \ + [XMIT_SZ_8BIT] = 0, \ + [XMIT_SZ_16BIT] = 1, \ + [XMIT_SZ_32BIT] = 2, \ + [XMIT_SZ_64BIT] = 3, \ + [XMIT_SZ_128BIT] = 4, \ + [XMIT_SZ_256BIT] = 5, \ + [XMIT_SZ_128BIT_BLK] = 4, \ + [XMIT_SZ_256BIT_BLK] = 5, \ +} + +#define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \ + ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT)) + +#else /* CONFIG_CPU_SH4A */ + +#define DMAOR_INIT (0x8000 | DMAOR_DME) + +#define CHCR_TS_LOW_MASK 0x70 +#define CHCR_TS_LOW_SHIFT 4 +#define CHCR_TS_HIGH_MASK 0 +#define CHCR_TS_HIGH_SHIFT 0 + +/* Transmit sizes and respective CHCR register values */ +enum { + XMIT_SZ_8BIT = 1, + XMIT_SZ_16BIT = 2, + XMIT_SZ_32BIT = 3, + XMIT_SZ_64BIT = 0, + XMIT_SZ_256BIT = 4, +}; + +/* log2(size / 8) - used to calculate number of transfers */ +#define TS_SHIFT { \ + [XMIT_SZ_8BIT] = 0, \ + [XMIT_SZ_16BIT] = 1, \ + [XMIT_SZ_32BIT] = 2, \ + [XMIT_SZ_64BIT] = 3, \ + [XMIT_SZ_256BIT] = 5, \ +} + +#define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT) + +#endif /* CONFIG_CPU_SH4A */ + +#endif diff --git a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h index e734ea47d8a..9647e681fd2 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h +++ b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h @@ -8,20 +8,12 @@ #define DMAE0_IRQ 78 /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMARS_BASE0 0xFE009000 -#define CHCR_TS_LOW_MASK 0x00000018 -#define CHCR_TS_LOW_SHIFT 3 -#define CHCR_TS_HIGH_MASK 0 -#define CHCR_TS_HIGH_SHIFT 0 #elif defined(CONFIG_CPU_SUBTYPE_SH7722) #define DMTE0_IRQ 48 #define DMTE4_IRQ 76 #define DMAE0_IRQ 78 /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMARS_BASE0 0xFE009000 -#define CHCR_TS_LOW_MASK 0x00000018 -#define CHCR_TS_LOW_SHIFT 3 -#define CHCR_TS_HIGH_MASK 0x00300000 -#define CHCR_TS_HIGH_SHIFT 20 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_CPU_SUBTYPE_SH7764) #define DMTE0_IRQ 34 @@ -29,10 +21,6 @@ #define DMAE0_IRQ 38 #define SH_DMAC_BASE0 0xFF608020 #define SH_DMARS_BASE0 0xFF609000 -#define CHCR_TS_LOW_MASK 0x00000018 -#define CHCR_TS_LOW_SHIFT 3 -#define CHCR_TS_HIGH_MASK 0 -#define CHCR_TS_HIGH_SHIFT 0 #elif defined(CONFIG_CPU_SUBTYPE_SH7723) #define DMTE0_IRQ 48 /* DMAC0A*/ #define DMTE4_IRQ 76 /* DMAC0B */ @@ -46,10 +34,6 @@ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMAC_BASE1 0xFDC08020 #define SH_DMARS_BASE0 0xFDC09000 -#define CHCR_TS_LOW_MASK 0x00000018 -#define CHCR_TS_LOW_SHIFT 3 -#define CHCR_TS_HIGH_MASK 0 -#define CHCR_TS_HIGH_SHIFT 0 #elif defined(CONFIG_CPU_SUBTYPE_SH7724) #define DMTE0_IRQ 48 /* DMAC0A*/ #define DMTE4_IRQ 76 /* DMAC0B */ @@ -64,10 +48,6 @@ #define SH_DMAC_BASE1 0xFDC08020 #define SH_DMARS_BASE0 0xFE009000 #define SH_DMARS_BASE1 0xFDC09000 -#define CHCR_TS_LOW_MASK 0x00000018 -#define CHCR_TS_LOW_SHIFT 3 -#define CHCR_TS_HIGH_MASK 0x00600000 -#define CHCR_TS_HIGH_SHIFT 21 #elif defined(CONFIG_CPU_SUBTYPE_SH7780) #define DMTE0_IRQ 34 #define DMTE4_IRQ 44 @@ -80,10 +60,6 @@ #define SH_DMAC_BASE0 0xFC808020 #define SH_DMAC_BASE1 0xFC818020 #define SH_DMARS_BASE0 0xFC809000 -#define CHCR_TS_LOW_MASK 0x00000018 -#define CHCR_TS_LOW_SHIFT 3 -#define CHCR_TS_HIGH_MASK 0 -#define CHCR_TS_HIGH_SHIFT 0 #else /* SH7785 */ #define DMTE0_IRQ 33 #define DMTE4_IRQ 37 @@ -97,10 +73,6 @@ #define SH_DMAC_BASE0 0xFC808020 #define SH_DMAC_BASE1 0xFCC08020 #define SH_DMARS_BASE0 0xFC809000 -#define CHCR_TS_LOW_MASK 0x00000018 -#define CHCR_TS_LOW_SHIFT 3 -#define CHCR_TS_HIGH_MASK 0 -#define CHCR_TS_HIGH_SHIFT 0 #endif #define REQ_HE 0x000000C0 @@ -108,38 +80,4 @@ #define REQ_LE 0x00000040 #define TM_BURST 0x00000020 -/* - * The SuperH DMAC supports a number of transmit sizes, we list them here, - * with their respective values as they appear in the CHCR registers. - * - * Defaults to a 64-bit transfer size. - */ -enum { - XMIT_SZ_8BIT = 0, - XMIT_SZ_16BIT = 1, - XMIT_SZ_32BIT = 2, - XMIT_SZ_64BIT = 7, - XMIT_SZ_128BIT = 3, - XMIT_SZ_256BIT = 4, - XMIT_SZ_128BIT_BLK = 0xb, - XMIT_SZ_256BIT_BLK = 0xc, -}; - -/* - * The DMA count is defined as the number of bytes to transfer. - */ -#define TS_SHIFT { \ - [XMIT_SZ_8BIT] = 0, \ - [XMIT_SZ_16BIT] = 1, \ - [XMIT_SZ_32BIT] = 2, \ - [XMIT_SZ_64BIT] = 3, \ - [XMIT_SZ_128BIT] = 4, \ - [XMIT_SZ_256BIT] = 5, \ - [XMIT_SZ_128BIT_BLK] = 4, \ - [XMIT_SZ_256BIT_BLK] = 5, \ -} - -#define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \ - ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT)) - #endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */ diff --git a/arch/sh/include/cpu-sh4/cpu/dma.h b/arch/sh/include/cpu-sh4/cpu/dma.h index 114a369705b..ca747e93c2e 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma.h +++ b/arch/sh/include/cpu-sh4/cpu/dma.h @@ -5,9 +5,8 @@ #ifdef CONFIG_CPU_SH4A -#define DMAOR_INIT (DMAOR_DME) - #include + #else /* CONFIG_CPU_SH4A */ /* * SH7750/SH7751/SH7760 @@ -17,7 +16,6 @@ #define DMTE6_IRQ 46 #define DMAE0_IRQ 38 -#define DMAOR_INIT (0x8000|DMAOR_DME) #define SH_DMAC_BASE0 0xffa00000 #define SH_DMAC_BASE1 0xffa00070 /* Definitions for the SuperH DMAC */ @@ -27,40 +25,8 @@ #define TS_32 0x00000030 #define TS_64 0x00000000 -#define CHCR_TS_LOW_MASK 0x70 -#define CHCR_TS_LOW_SHIFT 4 -#define CHCR_TS_HIGH_MASK 0 -#define CHCR_TS_HIGH_SHIFT 0 - #define DMAOR_COD 0x00000008 -/* - * The SuperH DMAC supports a number of transmit sizes, we list them here, - * with their respective values as they appear in the CHCR registers. - * - * Defaults to a 64-bit transfer size. - */ -enum { - XMIT_SZ_8BIT = 1, - XMIT_SZ_16BIT = 2, - XMIT_SZ_32BIT = 3, - XMIT_SZ_64BIT = 0, - XMIT_SZ_256BIT = 4, -}; - -/* - * The DMA count is defined as the number of bytes to transfer. - */ -#define TS_SHIFT { \ - [XMIT_SZ_8BIT] = 0, \ - [XMIT_SZ_16BIT] = 1, \ - [XMIT_SZ_32BIT] = 2, \ - [XMIT_SZ_64BIT] = 3, \ - [XMIT_SZ_256BIT] = 5, \ -} - -#define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT) - #endif #endif /* __ASM_CPU_SH4_DMA_H */ diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index aec182bed8a..89f84911c35 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -7,18 +7,21 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ -#include #include +#include +#include #include #include -#include +#include #include #include -#include + #include +#include #include -#include #include + +#include #include static struct sh_dmae_slave_config sh7722_dmae_slaves[] = { @@ -103,11 +106,20 @@ static struct sh_dmae_channel sh7722_dmae_channels[] = { } }; +static unsigned int ts_shift[] = TS_SHIFT; + static struct sh_dmae_pdata dma_platform_data = { .slave = sh7722_dmae_slaves, .slave_num = ARRAY_SIZE(sh7722_dmae_slaves), .channel = sh7722_dmae_channels, .channel_num = ARRAY_SIZE(sh7722_dmae_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, }; static struct resource sh7722_dmae_resources[] = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index aca1fb2c571..d9cb2c471a3 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -21,10 +21,13 @@ #include #include #include + #include #include -#include +#include #include + +#include #include /* DMA */ @@ -84,14 +87,30 @@ static struct sh_dmae_channel sh7724_dmae1_channels[] = { } }; +static unsigned int ts_shift[] = TS_SHIFT; + static struct sh_dmae_pdata dma0_platform_data = { .channel = sh7724_dmae0_channels, .channel_num = ARRAY_SIZE(sh7724_dmae0_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, }; static struct sh_dmae_pdata dma1_platform_data = { .channel = sh7724_dmae1_channels, .channel_num = ARRAY_SIZE(sh7724_dmae1_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, }; /* Resource order important! */ diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index 338dfc2c2bb..02e792c90de 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c @@ -13,7 +13,10 @@ #include #include #include -#include + +#include + +#include static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, @@ -292,14 +295,30 @@ static struct sh_dmae_channel sh7780_dmae1_channels[] = { } }; +static unsigned int ts_shift[] = TS_SHIFT; + static struct sh_dmae_pdata dma0_platform_data = { .channel = sh7780_dmae0_channels, .channel_num = ARRAY_SIZE(sh7780_dmae0_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, }; static struct sh_dmae_pdata dma1_platform_data = { .channel = sh7780_dmae1_channels, .channel_num = ARRAY_SIZE(sh7780_dmae1_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, }; static struct resource sh7780_dmae0_resources[] = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index fbb5d1f51f1..1fcd88b1671 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c @@ -14,9 +14,12 @@ #include #include #include -#include + +#include #include +#include + static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffea0000, .flags = UPF_BOOT_AUTOCONF, @@ -340,14 +343,30 @@ static struct sh_dmae_channel sh7785_dmae1_channels[] = { } }; +static unsigned int ts_shift[] = TS_SHIFT; + static struct sh_dmae_pdata dma0_platform_data = { .channel = sh7785_dmae0_channels, .channel_num = ARRAY_SIZE(sh7785_dmae0_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, }; static struct sh_dmae_pdata dma1_platform_data = { .channel = sh7785_dmae1_channels, .channel_num = ARRAY_SIZE(sh7785_dmae1_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, }; static struct resource sh7785_dmae0_resources[] = { diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index ab12fa5a129..b419afaa238 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -24,8 +24,7 @@ #include #include #include -#include -#include +#include #include "shdma.h" /* DMA descriptor control */ @@ -38,15 +37,8 @@ enum sh_dmae_desc_status { }; #define NR_DESCS_PER_CHANNEL 32 -/* - * Define the default configuration for dual address memory-memory transfer. - * The 0x400 value represents auto-request, external->external. - * - * And this driver set 4byte burst mode. - * If you want to change mode, you need to change RS_DEFAULT of value. - * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) - */ -#define RS_DEFAULT (RS_DUAL) +/* Default MEMCPY transfer size = 2^2 = 4 bytes */ +#define LOG2_DEFAULT_XFER_SIZE 2 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; @@ -90,7 +82,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) unsigned short dmaor; sh_dmae_ctl_stop(shdev); - dmaor = dmaor_read(shdev) | DMAOR_INIT; + dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; dmaor_write(shdev, dmaor); if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { @@ -110,13 +102,36 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) return false; /* waiting */ } -static unsigned int ts_shift[] = TS_SHIFT; -static inline unsigned int calc_xmit_shift(u32 chcr) +static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) { - int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | - ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); + struct sh_dmae_device *shdev = container_of(sh_chan->common.device, + struct sh_dmae_device, common); + struct sh_dmae_pdata *pdata = shdev->pdata; + int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | + ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); + + if (cnt >= pdata->ts_shift_num) + cnt = 0; - return ts_shift[cnt]; + return pdata->ts_shift[cnt]; +} + +static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) +{ + struct sh_dmae_device *shdev = container_of(sh_chan->common.device, + struct sh_dmae_device, common); + struct sh_dmae_pdata *pdata = shdev->pdata; + int i; + + for (i = 0; i < pdata->ts_shift_num; i++) + if (pdata->ts_shift[i] == l2size) + break; + + if (i == pdata->ts_shift_num) + i = 0; + + return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | + ((i << pdata->ts_high_shift) & pdata->ts_high_mask); } static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) @@ -144,8 +159,13 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) static void dmae_init(struct sh_dmae_chan *sh_chan) { - u32 chcr = RS_DEFAULT; /* default is DUAL mode */ - sh_chan->xmit_shift = calc_xmit_shift(chcr); + /* + * Default configuration for dual address memory-memory transfer. + * 0x400 represents auto-request. + */ + u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, + LOG2_DEFAULT_XFER_SIZE); + sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); sh_dmae_writel(sh_chan, chcr, CHCR); } @@ -155,7 +175,7 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) if (dmae_is_busy(sh_chan)) return -EBUSY; - sh_chan->xmit_shift = calc_xmit_shift(val); + sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); sh_dmae_writel(sh_chan, val, CHCR); return 0; @@ -285,9 +305,8 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) dmae_set_dmars(sh_chan, cfg->mid_rid); dmae_set_chcr(sh_chan, cfg->chcr); - } else { - if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400) - dmae_set_chcr(sh_chan, RS_DEFAULT); + } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { + dmae_init(sh_chan); } spin_lock_bh(&sh_chan->desc_lock); @@ -757,7 +776,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data) sh_dmae_ctl_stop(shdev); /* We cannot detect, which channel caused the error, have to reset all */ - for (i = 0; i < MAX_DMA_CHANNELS; i++) { + for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; if (sh_chan) { struct sh_desc *desc; @@ -822,6 +841,9 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, return -ENOMEM; } + /* copy struct dma_device */ + new_sh_chan->common.device = &shdev->common; + new_sh_chan->dev = shdev->common.dev; new_sh_chan->id = id; new_sh_chan->irq = irq; @@ -840,9 +862,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, INIT_LIST_HEAD(&new_sh_chan->ld_queue); INIT_LIST_HEAD(&new_sh_chan->ld_free); - /* copy struct dma_device */ - new_sh_chan->common.device = &shdev->common; - /* Add the channel to DMA device channel list */ list_add_tail(&new_sh_chan->common.device_node, &shdev->common.channels); @@ -896,8 +915,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev) { struct sh_dmae_pdata *pdata = pdev->dev.platform_data; unsigned long irqflags = IRQF_DISABLED, - chan_flag[MAX_DMA_CHANNELS] = {}; - int errirq, chan_irq[MAX_DMA_CHANNELS]; + chan_flag[SH_DMAC_MAX_CHANNELS] = {}; + int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; int err, i, irq_cnt = 0, irqres = 0; struct sh_dmae_device *shdev; struct resource *chan, *dmars, *errirq_res, *chanirq_res; @@ -983,7 +1002,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) shdev->common.dev = &pdev->dev; /* Default transfer size of 32 bytes requires 32-byte alignment */ - shdev->common.copy_align = 5; + shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; #if defined(CONFIG_CPU_SH4) chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 800fd884be8..9f0897f7fe3 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h @@ -17,6 +17,8 @@ #include #include +#include + #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ struct sh_dmae_regs { @@ -55,7 +57,7 @@ struct sh_dmae_chan { struct sh_dmae_device { struct dma_device common; - struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; + struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; struct sh_dmae_pdata *pdata; u32 __iomem *chan_reg; u16 __iomem *dmars; diff --git a/sound/soc/sh/siu.h b/sound/soc/sh/siu.h index 9cc04ab2bce..c0bfab8fed3 100644 --- a/sound/soc/sh/siu.h +++ b/sound/soc/sh/siu.h @@ -72,7 +72,7 @@ struct siu_firmware { #include #include -#include +#include #include #include diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c index c5efc30f013..ba7f8d05d97 100644 --- a/sound/soc/sh/siu_pcm.c +++ b/sound/soc/sh/siu_pcm.c @@ -32,7 +32,7 @@ #include #include -#include +#include #include #include "siu.h" -- cgit v1.2.3-70-g09d2 From 20f2a3b5d57701c54bdd59b89dd37fe775926bae Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 11 Feb 2010 16:50:18 +0000 Subject: dmaengine: shdma: add runtime PM support. Provided platforms implement runtime PM, this disables the controller, when not in use. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Paul Mundt --- drivers/dma/shdma.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index b419afaa238..ea6779f3e73 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -24,7 +24,10 @@ #include #include #include +#include + #include + #include "shdma.h" /* DMA descriptor control */ @@ -287,6 +290,8 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) struct sh_desc *desc; struct sh_dmae_slave *param = chan->private; + pm_runtime_get_sync(sh_chan->dev); + /* * This relies on the guarantee from dmaengine that alloc_chan_resources * never runs concurrently with itself or free_chan_resources. @@ -328,6 +333,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) } spin_unlock_bh(&sh_chan->desc_lock); + if (!sh_chan->descs_allocated) + pm_runtime_put(sh_chan->dev); + return sh_chan->descs_allocated; } @@ -339,6 +347,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) struct sh_dmae_chan *sh_chan = to_sh_chan(chan); struct sh_desc *desc, *_desc; LIST_HEAD(list); + int descs = sh_chan->descs_allocated; dmae_halt(sh_chan); @@ -359,6 +368,9 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) spin_unlock_bh(&sh_chan->desc_lock); + if (descs > 0) + pm_runtime_put(sh_chan->dev); + list_for_each_entry_safe(desc, _desc, &list, node) kfree(desc); } @@ -978,6 +990,9 @@ static int __init sh_dmae_probe(struct platform_device *pdev) /* platform data */ shdev->pdata = pdata; + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + /* reset dma controller */ err = sh_dmae_rst(shdev); if (err) @@ -1066,6 +1081,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev) goto chan_probe_err; } + pm_runtime_put(&pdev->dev); + platform_set_drvdata(pdev, shdev); dma_async_device_register(&shdev->common); @@ -1079,6 +1096,7 @@ eirqres: eirq_err: #endif rst_err: + pm_runtime_put(&pdev->dev); if (dmars) iounmap(shdev->dmars); emapdmars: @@ -1108,6 +1126,8 @@ static int __exit sh_dmae_remove(struct platform_device *pdev) /* channel data remove */ sh_dmae_chan_remove(shdev); + pm_runtime_disable(&pdev->dev); + if (shdev->dmars) iounmap(shdev->dmars); iounmap(shdev->chan_reg); -- cgit v1.2.3-70-g09d2 From c014906a870ce70e009def0c9d170ccabeb0be63 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 18 Feb 2010 16:30:02 +0000 Subject: dmaengine: shdma: extend .device_terminate_all() to record partial transfer This patch extends the .device_terminate_all() method of the shdma driver to return number of bytes transfered in the current descriptor. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Paul Mundt --- arch/sh/include/asm/dmaengine.h | 20 ++++++++++++++++++++ drivers/dma/shdma.c | 16 ++++++++++++++++ drivers/dma/shdma.h | 16 ---------------- 3 files changed, 36 insertions(+), 16 deletions(-) (limited to 'drivers/dma') diff --git a/arch/sh/include/asm/dmaengine.h b/arch/sh/include/asm/dmaengine.h index 9586e4a482b..bf2f30cf0a2 100644 --- a/arch/sh/include/asm/dmaengine.h +++ b/arch/sh/include/asm/dmaengine.h @@ -10,6 +10,9 @@ #ifndef ASM_DMAENGINE_H #define ASM_DMAENGINE_H +#include +#include + #include #define SH_DMAC_MAX_CHANNELS 6 @@ -70,4 +73,21 @@ struct sh_dmae_slave { struct sh_dmae_slave_config *config; /* Set by the driver */ }; +struct sh_dmae_regs { + u32 sar; /* SAR / source address */ + u32 dar; /* DAR / destination address */ + u32 tcr; /* TCR / transfer count */ +}; + +struct sh_desc { + struct sh_dmae_regs hw; + struct list_head node; + struct dma_async_tx_descriptor async_tx; + enum dma_data_direction direction; + dma_cookie_t cookie; + size_t partial; + int chunks; + int mark; +}; + #endif diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index ea6779f3e73..5d17e09cb62 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -587,6 +587,19 @@ static void sh_dmae_terminate_all(struct dma_chan *chan) if (!chan) return; + dmae_halt(sh_chan); + + spin_lock_bh(&sh_chan->desc_lock); + if (!list_empty(&sh_chan->ld_queue)) { + /* Record partial transfer */ + struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, + struct sh_desc, node); + desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << + sh_chan->xmit_shift; + + } + spin_unlock_bh(&sh_chan->desc_lock); + sh_dmae_chan_ld_cleanup(sh_chan, true); } @@ -701,6 +714,9 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) /* Find the first not transferred desciptor */ list_for_each_entry(desc, &sh_chan->ld_queue, node) if (desc->mark == DESC_SUBMITTED) { + dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", + desc->async_tx.cookie, sh_chan->id, + desc->hw.tcr, desc->hw.sar, desc->hw.dar); /* Get the ld start address from ld_queue */ dmae_set_reg(sh_chan, &desc->hw); dmae_start(sh_chan); diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 9f0897f7fe3..153609a1e96 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h @@ -21,22 +21,6 @@ #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ -struct sh_dmae_regs { - u32 sar; /* SAR / source address */ - u32 dar; /* DAR / destination address */ - u32 tcr; /* TCR / transfer count */ -}; - -struct sh_desc { - struct sh_dmae_regs hw; - struct list_head node; - struct dma_async_tx_descriptor async_tx; - enum dma_data_direction direction; - dma_cookie_t cookie; - int chunks; - int mark; -}; - struct device; struct sh_dmae_chan { -- cgit v1.2.3-70-g09d2 From 6c664a8915f5341c2e7f1df0bb4b9b4a88f6ad77 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 9 Feb 2010 22:34:54 +0100 Subject: Debugging options for the DMA engine subsystem This adds Kconfig options for DEBUG and VERBOSE_DEBUG to the DMA engine subsystem, I got tired of editing the Makefile manually each time I want to debug things in here, modelled this on the debug switches for other subsystems and works like a charm when working on our DMA engines. Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 16 ++++++++++++++++ drivers/dma/Makefile | 7 +++++++ 2 files changed, 23 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e02d74b1e89..1f4bbd66258 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -13,6 +13,22 @@ menuconfig DMADEVICES DMA Device drivers supported by the configured arch, it may be empty in some cases. +config DMADEVICES_DEBUG + bool "DMA Engine debugging" + depends on DMADEVICES != n + help + This is an option for use by developers; most people should + say N here. This enables DMA engine core and driver debugging. + +config DMADEVICES_VDEBUG + bool "DMA Engine verbose debugging" + depends on DMADEVICES_DEBUG != n + help + This is an option for use by developers; most people should + say N here. This enables deeper (more verbose) debugging of + the DMA engine core and drivers. + + if DMADEVICES comment "DMA Devices" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 807053d4823..54112d67ab2 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -1,3 +1,10 @@ +ifeq ($(CONFIG_DMADEVICES_DEBUG),y) + EXTRA_CFLAGS += -DDEBUG +endif +ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) + EXTRA_CFLAGS += -DVERBOSE_DEBUG +endif + obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_DMATEST) += dmatest.o -- cgit v1.2.3-70-g09d2 From 0fb6f739bb612bc989d295056877374b749e721b Mon Sep 17 00:00:00 2001 From: Piotr Ziecik Date: Fri, 5 Feb 2010 03:42:52 +0000 Subject: dma: Add MPC512x DMA driver Adds initial version of MPC512x DMA driver. Only memory to memory transfers are currenly supported. Signed-off-by: Piotr Ziecik Signed-off-by: Wolfgang Denk Signed-off-by: Anatolij Gustschin Cc: John Rigby Acked-by: Grant Likely Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 7 + drivers/dma/Makefile | 1 + drivers/dma/mpc512x_dma.c | 800 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 808 insertions(+) create mode 100644 drivers/dma/mpc512x_dma.c (limited to 'drivers/dma') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1f4bbd66258..c27f80e5d53 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -85,6 +85,13 @@ config FSL_DMA The Elo is the DMA controller on some 82xx and 83xx parts, and the Elo Plus is the DMA controller on 85xx and 86xx parts. +config MPC512X_DMA + tristate "Freescale MPC512x built-in DMA engine support" + depends on PPC_MPC512x + select DMA_ENGINE + ---help--- + Enable support for the Freescale MPC512x built-in DMA engine. + config MV_XOR bool "Marvell XOR engine support" depends on PLAT_ORION diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 54112d67ab2..22bba3d5e2b 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_FSL_DMA) += fsldma.o +obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c new file mode 100644 index 00000000000..3fdf1f46bd6 --- /dev/null +++ b/drivers/dma/mpc512x_dma.c @@ -0,0 +1,800 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. + * Copyright (C) Semihalf 2009 + * + * Written by Piotr Ziecik . Hardware description + * (defines, structures and comments) was taken from MPC5121 DMA driver + * written by Hongjun Chen . + * + * Approved as OSADL project by a majority of OSADL members and funded + * by OSADL membership fees in 2009; for details see www.osadl.org. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ + +/* + * This is initial version of MPC5121 DMA driver. Only memory to memory + * transfers are supported (tested using dmatest module). + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* Number of DMA Transfer descriptors allocated per channel */ +#define MPC_DMA_DESCRIPTORS 64 + +/* Macro definitions */ +#define MPC_DMA_CHANNELS 64 +#define MPC_DMA_TCD_OFFSET 0x1000 + +/* Arbitration mode of group and channel */ +#define MPC_DMA_DMACR_EDCG (1 << 31) +#define MPC_DMA_DMACR_ERGA (1 << 3) +#define MPC_DMA_DMACR_ERCA (1 << 2) + +/* Error codes */ +#define MPC_DMA_DMAES_VLD (1 << 31) +#define MPC_DMA_DMAES_GPE (1 << 15) +#define MPC_DMA_DMAES_CPE (1 << 14) +#define MPC_DMA_DMAES_ERRCHN(err) \ + (((err) >> 8) & 0x3f) +#define MPC_DMA_DMAES_SAE (1 << 7) +#define MPC_DMA_DMAES_SOE (1 << 6) +#define MPC_DMA_DMAES_DAE (1 << 5) +#define MPC_DMA_DMAES_DOE (1 << 4) +#define MPC_DMA_DMAES_NCE (1 << 3) +#define MPC_DMA_DMAES_SGE (1 << 2) +#define MPC_DMA_DMAES_SBE (1 << 1) +#define MPC_DMA_DMAES_DBE (1 << 0) + +#define MPC_DMA_TSIZE_1 0x00 +#define MPC_DMA_TSIZE_2 0x01 +#define MPC_DMA_TSIZE_4 0x02 +#define MPC_DMA_TSIZE_16 0x04 +#define MPC_DMA_TSIZE_32 0x05 + +/* MPC5121 DMA engine registers */ +struct __attribute__ ((__packed__)) mpc_dma_regs { + /* 0x00 */ + u32 dmacr; /* DMA control register */ + u32 dmaes; /* DMA error status */ + /* 0x08 */ + u32 dmaerqh; /* DMA enable request high(channels 63~32) */ + u32 dmaerql; /* DMA enable request low(channels 31~0) */ + u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ + u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ + /* 0x18 */ + u8 dmaserq; /* DMA set enable request */ + u8 dmacerq; /* DMA clear enable request */ + u8 dmaseei; /* DMA set enable error interrupt */ + u8 dmaceei; /* DMA clear enable error interrupt */ + /* 0x1c */ + u8 dmacint; /* DMA clear interrupt request */ + u8 dmacerr; /* DMA clear error */ + u8 dmassrt; /* DMA set start bit */ + u8 dmacdne; /* DMA clear DONE status bit */ + /* 0x20 */ + u32 dmainth; /* DMA interrupt request high(ch63~32) */ + u32 dmaintl; /* DMA interrupt request low(ch31~0) */ + u32 dmaerrh; /* DMA error high(ch63~32) */ + u32 dmaerrl; /* DMA error low(ch31~0) */ + /* 0x30 */ + u32 dmahrsh; /* DMA hw request status high(ch63~32) */ + u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ + u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ + u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ + /* 0x40 ~ 0xff */ + u32 reserve0[48]; /* Reserved */ + /* 0x100 */ + u8 dchpri[MPC_DMA_CHANNELS]; + /* DMA channels(0~63) priority */ +}; + +struct __attribute__ ((__packed__)) mpc_dma_tcd { + /* 0x00 */ + u32 saddr; /* Source address */ + + u32 smod:5; /* Source address modulo */ + u32 ssize:3; /* Source data transfer size */ + u32 dmod:5; /* Destination address modulo */ + u32 dsize:3; /* Destination data transfer size */ + u32 soff:16; /* Signed source address offset */ + + /* 0x08 */ + u32 nbytes; /* Inner "minor" byte count */ + u32 slast; /* Last source address adjustment */ + u32 daddr; /* Destination address */ + + /* 0x14 */ + u32 citer_elink:1; /* Enable channel-to-channel linking on + * minor loop complete + */ + u32 citer_linkch:6; /* Link channel for minor loop complete */ + u32 citer:9; /* Current "major" iteration count */ + u32 doff:16; /* Signed destination address offset */ + + /* 0x18 */ + u32 dlast_sga; /* Last Destination address adjustment/scatter + * gather address + */ + + /* 0x1c */ + u32 biter_elink:1; /* Enable channel-to-channel linking on major + * loop complete + */ + u32 biter_linkch:6; + u32 biter:9; /* Beginning "major" iteration count */ + u32 bwc:2; /* Bandwidth control */ + u32 major_linkch:6; /* Link channel number */ + u32 done:1; /* Channel done */ + u32 active:1; /* Channel active */ + u32 major_elink:1; /* Enable channel-to-channel linking on major + * loop complete + */ + u32 e_sg:1; /* Enable scatter/gather processing */ + u32 d_req:1; /* Disable request */ + u32 int_half:1; /* Enable an interrupt when major counter is + * half complete + */ + u32 int_maj:1; /* Enable an interrupt when major iteration + * count completes + */ + u32 start:1; /* Channel start */ +}; + +struct mpc_dma_desc { + struct dma_async_tx_descriptor desc; + struct mpc_dma_tcd *tcd; + dma_addr_t tcd_paddr; + int error; + struct list_head node; +}; + +struct mpc_dma_chan { + struct dma_chan chan; + struct list_head free; + struct list_head prepared; + struct list_head queued; + struct list_head active; + struct list_head completed; + struct mpc_dma_tcd *tcd; + dma_addr_t tcd_paddr; + dma_cookie_t completed_cookie; + + /* Lock for this structure */ + spinlock_t lock; +}; + +struct mpc_dma { + struct dma_device dma; + struct tasklet_struct tasklet; + struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; + struct mpc_dma_regs __iomem *regs; + struct mpc_dma_tcd __iomem *tcd; + int irq; + uint error_status; + + /* Lock for error_status field in this structure */ + spinlock_t error_status_lock; +}; + +#define DRV_NAME "mpc512x_dma" + +/* Convert struct dma_chan to struct mpc_dma_chan */ +static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct mpc_dma_chan, chan); +} + +/* Convert struct dma_chan to struct mpc_dma */ +static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) +{ + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); + return container_of(mchan, struct mpc_dma, channels[c->chan_id]); +} + +/* + * Execute all queued DMA descriptors. + * + * Following requirements must be met while calling mpc_dma_execute(): + * a) mchan->lock is acquired, + * b) mchan->active list is empty, + * c) mchan->queued list contains at least one entry. + */ +static void mpc_dma_execute(struct mpc_dma_chan *mchan) +{ + struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); + struct mpc_dma_desc *first = NULL; + struct mpc_dma_desc *prev = NULL; + struct mpc_dma_desc *mdesc; + int cid = mchan->chan.chan_id; + + /* Move all queued descriptors to active list */ + list_splice_tail_init(&mchan->queued, &mchan->active); + + /* Chain descriptors into one transaction */ + list_for_each_entry(mdesc, &mchan->active, node) { + if (!first) + first = mdesc; + + if (!prev) { + prev = mdesc; + continue; + } + + prev->tcd->dlast_sga = mdesc->tcd_paddr; + prev->tcd->e_sg = 1; + mdesc->tcd->start = 1; + + prev = mdesc; + } + + prev->tcd->start = 0; + prev->tcd->int_maj = 1; + + /* Send first descriptor in chain into hardware */ + memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); + out_8(&mdma->regs->dmassrt, cid); +} + +/* Handle interrupt on one half of DMA controller (32 channels) */ +static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) +{ + struct mpc_dma_chan *mchan; + struct mpc_dma_desc *mdesc; + u32 status = is | es; + int ch; + + while ((ch = fls(status) - 1) >= 0) { + status &= ~(1 << ch); + mchan = &mdma->channels[ch + off]; + + spin_lock(&mchan->lock); + + /* Check error status */ + if (es & (1 << ch)) + list_for_each_entry(mdesc, &mchan->active, node) + mdesc->error = -EIO; + + /* Execute queued descriptors */ + list_splice_tail_init(&mchan->active, &mchan->completed); + if (!list_empty(&mchan->queued)) + mpc_dma_execute(mchan); + + spin_unlock(&mchan->lock); + } +} + +/* Interrupt handler */ +static irqreturn_t mpc_dma_irq(int irq, void *data) +{ + struct mpc_dma *mdma = data; + uint es; + + /* Save error status register */ + es = in_be32(&mdma->regs->dmaes); + spin_lock(&mdma->error_status_lock); + if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) + mdma->error_status = es; + spin_unlock(&mdma->error_status_lock); + + /* Handle interrupt on each channel */ + mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), + in_be32(&mdma->regs->dmaerrh), 32); + mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), + in_be32(&mdma->regs->dmaerrl), 0); + + /* Ack interrupt on all channels */ + out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); + + /* Schedule tasklet */ + tasklet_schedule(&mdma->tasklet); + + return IRQ_HANDLED; +} + +/* DMA Tasklet */ +static void mpc_dma_tasklet(unsigned long data) +{ + struct mpc_dma *mdma = (void *)data; + dma_cookie_t last_cookie = 0; + struct mpc_dma_chan *mchan; + struct mpc_dma_desc *mdesc; + struct dma_async_tx_descriptor *desc; + unsigned long flags; + LIST_HEAD(list); + uint es; + int i; + + spin_lock_irqsave(&mdma->error_status_lock, flags); + es = mdma->error_status; + mdma->error_status = 0; + spin_unlock_irqrestore(&mdma->error_status_lock, flags); + + /* Print nice error report */ + if (es) { + dev_err(mdma->dma.dev, + "Hardware reported following error(s) on channel %u:\n", + MPC_DMA_DMAES_ERRCHN(es)); + + if (es & MPC_DMA_DMAES_GPE) + dev_err(mdma->dma.dev, "- Group Priority Error\n"); + if (es & MPC_DMA_DMAES_CPE) + dev_err(mdma->dma.dev, "- Channel Priority Error\n"); + if (es & MPC_DMA_DMAES_SAE) + dev_err(mdma->dma.dev, "- Source Address Error\n"); + if (es & MPC_DMA_DMAES_SOE) + dev_err(mdma->dma.dev, "- Source Offset" + " Configuration Error\n"); + if (es & MPC_DMA_DMAES_DAE) + dev_err(mdma->dma.dev, "- Destination Address" + " Error\n"); + if (es & MPC_DMA_DMAES_DOE) + dev_err(mdma->dma.dev, "- Destination Offset" + " Configuration Error\n"); + if (es & MPC_DMA_DMAES_NCE) + dev_err(mdma->dma.dev, "- NBytes/Citter" + " Configuration Error\n"); + if (es & MPC_DMA_DMAES_SGE) + dev_err(mdma->dma.dev, "- Scatter/Gather" + " Configuration Error\n"); + if (es & MPC_DMA_DMAES_SBE) + dev_err(mdma->dma.dev, "- Source Bus Error\n"); + if (es & MPC_DMA_DMAES_DBE) + dev_err(mdma->dma.dev, "- Destination Bus Error\n"); + } + + for (i = 0; i < mdma->dma.chancnt; i++) { + mchan = &mdma->channels[i]; + + /* Get all completed descriptors */ + spin_lock_irqsave(&mchan->lock, flags); + if (!list_empty(&mchan->completed)) + list_splice_tail_init(&mchan->completed, &list); + spin_unlock_irqrestore(&mchan->lock, flags); + + if (list_empty(&list)) + continue; + + /* Execute callbacks and run dependencies */ + list_for_each_entry(mdesc, &list, node) { + desc = &mdesc->desc; + + if (desc->callback) + desc->callback(desc->callback_param); + + last_cookie = desc->cookie; + dma_run_dependencies(desc); + } + + /* Free descriptors */ + spin_lock_irqsave(&mchan->lock, flags); + list_splice_tail_init(&list, &mchan->free); + mchan->completed_cookie = last_cookie; + spin_unlock_irqrestore(&mchan->lock, flags); + } +} + +/* Submit descriptor to hardware */ +static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) +{ + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); + struct mpc_dma_desc *mdesc; + unsigned long flags; + dma_cookie_t cookie; + + mdesc = container_of(txd, struct mpc_dma_desc, desc); + + spin_lock_irqsave(&mchan->lock, flags); + + /* Move descriptor to queue */ + list_move_tail(&mdesc->node, &mchan->queued); + + /* If channel is idle, execute all queued descriptors */ + if (list_empty(&mchan->active)) + mpc_dma_execute(mchan); + + /* Update cookie */ + cookie = mchan->chan.cookie + 1; + if (cookie <= 0) + cookie = 1; + + mchan->chan.cookie = cookie; + mdesc->desc.cookie = cookie; + + spin_unlock_irqrestore(&mchan->lock, flags); + + return cookie; +} + +/* Alloc channel resources */ +static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); + struct mpc_dma_desc *mdesc; + struct mpc_dma_tcd *tcd; + dma_addr_t tcd_paddr; + unsigned long flags; + LIST_HEAD(descs); + int i; + + /* Alloc DMA memory for Transfer Control Descriptors */ + tcd = dma_alloc_coherent(mdma->dma.dev, + MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), + &tcd_paddr, GFP_KERNEL); + if (!tcd) + return -ENOMEM; + + /* Alloc descriptors for this channel */ + for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { + mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); + if (!mdesc) { + dev_notice(mdma->dma.dev, "Memory allocation error. " + "Allocated only %u descriptors\n", i); + break; + } + + dma_async_tx_descriptor_init(&mdesc->desc, chan); + mdesc->desc.flags = DMA_CTRL_ACK; + mdesc->desc.tx_submit = mpc_dma_tx_submit; + + mdesc->tcd = &tcd[i]; + mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); + + list_add_tail(&mdesc->node, &descs); + } + + /* Return error only if no descriptors were allocated */ + if (i == 0) { + dma_free_coherent(mdma->dma.dev, + MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), + tcd, tcd_paddr); + return -ENOMEM; + } + + spin_lock_irqsave(&mchan->lock, flags); + mchan->tcd = tcd; + mchan->tcd_paddr = tcd_paddr; + list_splice_tail_init(&descs, &mchan->free); + spin_unlock_irqrestore(&mchan->lock, flags); + + /* Enable Error Interrupt */ + out_8(&mdma->regs->dmaseei, chan->chan_id); + + return 0; +} + +/* Free channel resources */ +static void mpc_dma_free_chan_resources(struct dma_chan *chan) +{ + struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); + struct mpc_dma_desc *mdesc, *tmp; + struct mpc_dma_tcd *tcd; + dma_addr_t tcd_paddr; + unsigned long flags; + LIST_HEAD(descs); + + spin_lock_irqsave(&mchan->lock, flags); + + /* Channel must be idle */ + BUG_ON(!list_empty(&mchan->prepared)); + BUG_ON(!list_empty(&mchan->queued)); + BUG_ON(!list_empty(&mchan->active)); + BUG_ON(!list_empty(&mchan->completed)); + + /* Move data */ + list_splice_tail_init(&mchan->free, &descs); + tcd = mchan->tcd; + tcd_paddr = mchan->tcd_paddr; + + spin_unlock_irqrestore(&mchan->lock, flags); + + /* Free DMA memory used by descriptors */ + dma_free_coherent(mdma->dma.dev, + MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), + tcd, tcd_paddr); + + /* Free descriptors */ + list_for_each_entry_safe(mdesc, tmp, &descs, node) + kfree(mdesc); + + /* Disable Error Interrupt */ + out_8(&mdma->regs->dmaceei, chan->chan_id); +} + +/* Send all pending descriptor to hardware */ +static void mpc_dma_issue_pending(struct dma_chan *chan) +{ + /* + * We are posting descriptors to the hardware as soon as + * they are ready, so this function does nothing. + */ +} + +/* Check request completion status */ +static enum dma_status +mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); + unsigned long flags; + dma_cookie_t last_used; + dma_cookie_t last_complete; + + spin_lock_irqsave(&mchan->lock, flags); + last_used = mchan->chan.cookie; + last_complete = mchan->completed_cookie; + spin_unlock_irqrestore(&mchan->lock, flags); + + if (done) + *done = last_complete; + + if (used) + *used = last_used; + + return dma_async_is_complete(cookie, last_complete, last_used); +} + +/* Prepare descriptor for memory to memory copy */ +static struct dma_async_tx_descriptor * +mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); + struct mpc_dma_desc *mdesc = NULL; + struct mpc_dma_tcd *tcd; + unsigned long iflags; + + /* Get free descriptor */ + spin_lock_irqsave(&mchan->lock, iflags); + if (!list_empty(&mchan->free)) { + mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, + node); + list_del(&mdesc->node); + } + spin_unlock_irqrestore(&mchan->lock, iflags); + + if (!mdesc) + return NULL; + + mdesc->error = 0; + tcd = mdesc->tcd; + + /* Prepare Transfer Control Descriptor for this transaction */ + memset(tcd, 0, sizeof(struct mpc_dma_tcd)); + + if (IS_ALIGNED(src | dst | len, 32)) { + tcd->ssize = MPC_DMA_TSIZE_32; + tcd->dsize = MPC_DMA_TSIZE_32; + tcd->soff = 32; + tcd->doff = 32; + } else if (IS_ALIGNED(src | dst | len, 16)) { + tcd->ssize = MPC_DMA_TSIZE_16; + tcd->dsize = MPC_DMA_TSIZE_16; + tcd->soff = 16; + tcd->doff = 16; + } else if (IS_ALIGNED(src | dst | len, 4)) { + tcd->ssize = MPC_DMA_TSIZE_4; + tcd->dsize = MPC_DMA_TSIZE_4; + tcd->soff = 4; + tcd->doff = 4; + } else if (IS_ALIGNED(src | dst | len, 2)) { + tcd->ssize = MPC_DMA_TSIZE_2; + tcd->dsize = MPC_DMA_TSIZE_2; + tcd->soff = 2; + tcd->doff = 2; + } else { + tcd->ssize = MPC_DMA_TSIZE_1; + tcd->dsize = MPC_DMA_TSIZE_1; + tcd->soff = 1; + tcd->doff = 1; + } + + tcd->saddr = src; + tcd->daddr = dst; + tcd->nbytes = len; + tcd->biter = 1; + tcd->citer = 1; + + /* Place descriptor in prepared list */ + spin_lock_irqsave(&mchan->lock, iflags); + list_add_tail(&mdesc->node, &mchan->prepared); + spin_unlock_irqrestore(&mchan->lock, iflags); + + return &mdesc->desc; +} + +static int __devinit mpc_dma_probe(struct of_device *op, + const struct of_device_id *match) +{ + struct device_node *dn = op->node; + struct device *dev = &op->dev; + struct dma_device *dma; + struct mpc_dma *mdma; + struct mpc_dma_chan *mchan; + struct resource res; + ulong regs_start, regs_size; + int retval, i; + + mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); + if (!mdma) { + dev_err(dev, "Memory exhausted!\n"); + return -ENOMEM; + } + + mdma->irq = irq_of_parse_and_map(dn, 0); + if (mdma->irq == NO_IRQ) { + dev_err(dev, "Error mapping IRQ!\n"); + return -EINVAL; + } + + retval = of_address_to_resource(dn, 0, &res); + if (retval) { + dev_err(dev, "Error parsing memory region!\n"); + return retval; + } + + regs_start = res.start; + regs_size = res.end - res.start + 1; + + if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { + dev_err(dev, "Error requesting memory region!\n"); + return -EBUSY; + } + + mdma->regs = devm_ioremap(dev, regs_start, regs_size); + if (!mdma->regs) { + dev_err(dev, "Error mapping memory region!\n"); + return -ENOMEM; + } + + mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) + + MPC_DMA_TCD_OFFSET); + + retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, + mdma); + if (retval) { + dev_err(dev, "Error requesting IRQ!\n"); + return -EINVAL; + } + + spin_lock_init(&mdma->error_status_lock); + + dma = &mdma->dma; + dma->dev = dev; + dma->chancnt = MPC_DMA_CHANNELS; + dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; + dma->device_free_chan_resources = mpc_dma_free_chan_resources; + dma->device_issue_pending = mpc_dma_issue_pending; + dma->device_is_tx_complete = mpc_dma_is_tx_complete; + dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; + + INIT_LIST_HEAD(&dma->channels); + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + + for (i = 0; i < dma->chancnt; i++) { + mchan = &mdma->channels[i]; + + mchan->chan.device = dma; + mchan->chan.chan_id = i; + mchan->chan.cookie = 1; + mchan->completed_cookie = mchan->chan.cookie; + + INIT_LIST_HEAD(&mchan->free); + INIT_LIST_HEAD(&mchan->prepared); + INIT_LIST_HEAD(&mchan->queued); + INIT_LIST_HEAD(&mchan->active); + INIT_LIST_HEAD(&mchan->completed); + + spin_lock_init(&mchan->lock); + list_add_tail(&mchan->chan.device_node, &dma->channels); + } + + tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); + + /* + * Configure DMA Engine: + * - Dynamic clock, + * - Round-robin group arbitration, + * - Round-robin channel arbitration. + */ + out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | + MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); + + /* Disable hardware DMA requests */ + out_be32(&mdma->regs->dmaerqh, 0); + out_be32(&mdma->regs->dmaerql, 0); + + /* Disable error interrupts */ + out_be32(&mdma->regs->dmaeeih, 0); + out_be32(&mdma->regs->dmaeeil, 0); + + /* Clear interrupts status */ + out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); + + /* Route interrupts to IPIC */ + out_be32(&mdma->regs->dmaihsa, 0); + out_be32(&mdma->regs->dmailsa, 0); + + /* Register DMA engine */ + dev_set_drvdata(dev, mdma); + retval = dma_async_device_register(dma); + if (retval) { + devm_free_irq(dev, mdma->irq, mdma); + irq_dispose_mapping(mdma->irq); + } + + return retval; +} + +static int __devexit mpc_dma_remove(struct of_device *op) +{ + struct device *dev = &op->dev; + struct mpc_dma *mdma = dev_get_drvdata(dev); + + dma_async_device_unregister(&mdma->dma); + devm_free_irq(dev, mdma->irq, mdma); + irq_dispose_mapping(mdma->irq); + + return 0; +} + +static struct of_device_id mpc_dma_match[] = { + { .compatible = "fsl,mpc5121-dma", }, + {}, +}; + +static struct of_platform_driver mpc_dma_driver = { + .match_table = mpc_dma_match, + .probe = mpc_dma_probe, + .remove = __devexit_p(mpc_dma_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init mpc_dma_init(void) +{ + return of_register_platform_driver(&mpc_dma_driver); +} +module_init(mpc_dma_init); + +static void __exit mpc_dma_exit(void) +{ + of_unregister_platform_driver(&mpc_dma_driver); +} +module_exit(mpc_dma_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Piotr Ziecik "); -- cgit v1.2.3-70-g09d2 From 848ad121240f539e14a59eddd69e164aea9560b2 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 2 Mar 2010 14:17:15 -0700 Subject: DMAENGINE: COH 901 318 cleanups This cleans up the some debug code that was not working in the COH 901 318 driver, adds some helpful comments and rearrange the code a bit. Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/coh901318.c | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 64a937262a4..f1bf4f74ad8 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -80,18 +80,16 @@ struct coh901318_chan { static void coh901318_list_print(struct coh901318_chan *cohc, struct coh901318_lli *lli) { - struct coh901318_lli *l; - dma_addr_t addr = virt_to_phys(lli); + struct coh901318_lli *l = lli; int i = 0; - while (addr) { - l = phys_to_virt(addr); + while (l) { dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" - ", dst 0x%x, link 0x%x link_virt 0x%p\n", + ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n", i, l, l->control, l->src_addr, l->dst_addr, - l->link_addr, phys_to_virt(l->link_addr)); + l->link_addr, l->virt_link_addr); i++; - addr = l->link_addr; + l = l->virt_link_addr; } } @@ -125,7 +123,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf, goto err_kmalloc; tmp = dev_buf; - tmp += sprintf(tmp, "DMA -- enable dma channels\n"); + tmp += sprintf(tmp, "DMA -- enabled dma channels\n"); for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) if (started_channels & (1 << i)) @@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) return cohd_que; } +/* + * This tasklet is called from the interrupt handler to + * handle each descriptor (DMA job) that is sent to a channel. + */ static void dma_tasklet(unsigned long data) { struct coh901318_chan *cohc = (struct coh901318_chan *) data; @@ -600,9 +602,13 @@ static void dma_tasklet(unsigned long data) dma_async_tx_callback callback; void *callback_param; + dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" + " nbr_active_done %ld\n", __func__, + cohc->id, cohc->nbr_active_done); + spin_lock_irqsave(&cohc->lock, flags); - /* get first active entry from list */ + /* get first active descriptor entry from list */ cohd_fin = coh901318_first_active_get(cohc); BUG_ON(cohd_fin->pending_irqs == 0); @@ -636,10 +642,19 @@ static void dma_tasklet(unsigned long data) coh901318_desc_free(cohc, cohd_fin); } + /* + * If another interrupt fired while the tasklet was scheduling, + * we don't get called twice, so we have this number of active + * counter that keep track of the number of IRQs expected to + * be handled for this channel. If there happen to be more than + * one IRQ to be ack:ed, we simply schedule this tasklet again. + */ if (cohc->nbr_active_done) cohc->nbr_active_done--; if (cohc->nbr_active_done) { + dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs " + "came in while we were scheduling this tasklet\n"); if (cohc_chan_conf(cohc)->priority_high) tasklet_hi_schedule(&cohc->tasklet); else @@ -994,6 +1009,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, len += factor; } + pr_debug("Allocate %d lli:s for this transfer\n", len); data = coh901318_lli_alloc(&cohc->base->pool, len); if (data == NULL) @@ -1092,9 +1108,8 @@ coh901318_terminate_all(struct dma_chan *chan) /* release the lli allocation*/ coh901318_lli_free(&cohc->base->pool, &cohd->data); - coh901318_desc_remove(cohd); - /* return desc to free-list */ + coh901318_desc_remove(cohd); coh901318_desc_free(cohc, cohd); } @@ -1102,9 +1117,8 @@ coh901318_terminate_all(struct dma_chan *chan) /* release the lli allocation*/ coh901318_lli_free(&cohc->base->pool, &cohd->data); - coh901318_desc_remove(cohd); - /* return desc to free-list */ + coh901318_desc_remove(cohd); coh901318_desc_free(cohc, cohd); } @@ -1259,7 +1273,7 @@ static int __init coh901318_probe(struct platform_device *pdev) if (err) goto err_register_memcpy; - dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", + dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", (u32) base->virtbase); return err; -- cgit v1.2.3-70-g09d2 From b87108a772e001af3fa79f9cfd87b190375f47a2 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 2 Mar 2010 14:17:20 -0700 Subject: DMAENGINE: COH 901 318 descriptor pool refactoring This centralize some spread-out initialization of descriptors into one function and cleans up the error paths. Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- arch/arm/mach-u300/include/mach/coh901318.h | 2 +- drivers/dma/coh901318.c | 54 +++++++++++++---------------- 2 files changed, 26 insertions(+), 30 deletions(-) (limited to 'drivers/dma') diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h index f4cfee9c7d2..b8155b4e5ff 100644 --- a/arch/arm/mach-u300/include/mach/coh901318.h +++ b/arch/arm/mach-u300/include/mach/coh901318.h @@ -53,7 +53,7 @@ struct coh901318_params { * struct coh_dma_channel - dma channel base * @name: ascii name of dma channel * @number: channel id number - * @desc_nbr_max: number of preallocated descriptortors + * @desc_nbr_max: number of preallocated descriptors * @priority_high: prio of channel, 0 low otherwise high. * @param: configuration parameters * @dev_addr: physical address of periphal connected to channel diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index f1bf4f74ad8..12a7a151be6 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -335,16 +335,22 @@ coh901318_desc_get(struct coh901318_chan *cohc) * TODO: alloc a pile of descs instead of just one, * avoid many small allocations. */ - desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); + desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); if (desc == NULL) goto out; INIT_LIST_HEAD(&desc->node); + dma_async_tx_descriptor_init(&desc->desc, &cohc->chan); } else { /* Reuse an old desc. */ desc = list_first_entry(&cohc->free, struct coh901318_desc, node); list_del(&desc->node); + /* Initialize it a bit so it's not insane */ + desc->sg = NULL; + desc->sg_len = 0; + desc->desc.callback = NULL; + desc->desc.callback_param = NULL; } out: @@ -885,6 +891,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, struct coh901318_chan *cohc = to_coh901318_chan(chan); int lli_len; u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; + int ret; spin_lock_irqsave(&cohc->lock, flg); @@ -905,22 +912,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, if (data == NULL) goto err; - cohd = coh901318_desc_get(cohc); - cohd->sg = NULL; - cohd->sg_len = 0; - cohd->data = data; - - cohd->pending_irqs = - coh901318_lli_fill_memcpy( - &cohc->base->pool, data, src, size, dest, - cohc_chan_param(cohc)->ctrl_lli_chained, - ctrl_last); - cohd->flags = flags; + ret = coh901318_lli_fill_memcpy( + &cohc->base->pool, data, src, size, dest, + cohc_chan_param(cohc)->ctrl_lli_chained, + ctrl_last); + if (ret) + goto err; COH_DBG(coh901318_list_print(cohc, data)); - dma_async_tx_descriptor_init(&cohd->desc, chan); - + /* Pick a descriptor to handle this transfer */ + cohd = coh901318_desc_get(cohc); + cohd->data = data; + cohd->flags = flags; cohd->desc.tx_submit = coh901318_tx_submit; spin_unlock_irqrestore(&cohc->lock, flg); @@ -962,11 +966,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, /* Trigger interrupt after last lli */ ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; - cohd = coh901318_desc_get(cohc); - cohd->sg = NULL; - cohd->sg_len = 0; - cohd->dir = direction; - if (direction == DMA_TO_DEVICE) { u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; @@ -984,11 +983,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } else goto err_direction; - dma_async_tx_descriptor_init(&cohd->desc, chan); - - cohd->desc.tx_submit = coh901318_tx_submit; - - /* The dma only supports transmitting packages up to * MAX_DMA_PACKET_SIZE. Calculate to total number of * dma elemts required to send the entire sg list @@ -1023,19 +1017,21 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctrl, ctrl_last, direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); - cohd->data = data; - - cohd->flags = flags; COH_DBG(coh901318_list_print(cohc, data)); + /* Pick a descriptor to handle this transfer */ + cohd = coh901318_desc_get(cohc); + cohd->dir = direction; + cohd->flags = flags; + cohd->desc.tx_submit = coh901318_tx_submit; + cohd->data = data; + spin_unlock_irqrestore(&cohc->lock, flg); return &cohd->desc; err_dma_alloc: err_direction: - coh901318_desc_remove(cohd); - coh901318_desc_free(cohc, cohd); spin_unlock_irqrestore(&cohc->lock, flg); out: return NULL; -- cgit v1.2.3-70-g09d2 From 0b58828c923e57f1bfbbd2c4277ceb60666314fa Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 2 Mar 2010 14:17:44 -0700 Subject: DMAENGINE: COH 901 318 remove irq counting This removes the pointless irq counting for the COH 901 318, as it turns out the hardware will only ever fire one IRQ for a linked list anyway. In the process also a missing spinlock was introduced. Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/coh901318.c | 76 ++++++++++++++++++--------------------------- drivers/dma/coh901318_lli.c | 13 ++------ 2 files changed, 34 insertions(+), 55 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 12a7a151be6..544c46278f8 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -39,7 +39,6 @@ struct coh901318_desc { unsigned int sg_len; struct coh901318_lli *data; enum dma_data_direction dir; - int pending_irqs; unsigned long flags; }; @@ -72,7 +71,6 @@ struct coh901318_chan { unsigned long nbr_active_done; unsigned long busy; - int pending_irqs; struct coh901318_base *base; }; @@ -368,10 +366,6 @@ static void coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) { list_add_tail(&desc->node, &cohc->active); - - BUG_ON(cohc->pending_irqs != 0); - - cohc->pending_irqs = desc->pending_irqs; } static struct coh901318_desc * @@ -617,36 +611,30 @@ static void dma_tasklet(unsigned long data) /* get first active descriptor entry from list */ cohd_fin = coh901318_first_active_get(cohc); - BUG_ON(cohd_fin->pending_irqs == 0); - if (cohd_fin == NULL) goto err; - cohd_fin->pending_irqs--; - cohc->completed = cohd_fin->desc.cookie; + /* locate callback to client */ + callback = cohd_fin->desc.callback; + callback_param = cohd_fin->desc.callback_param; - if (cohc->nbr_active_done == 0) - return; + /* sign this job as completed on the channel */ + cohc->completed = cohd_fin->desc.cookie; - if (!cohd_fin->pending_irqs) { - /* release the lli allocation*/ - coh901318_lli_free(&cohc->base->pool, &cohd_fin->data); - } + /* release the lli allocation and remove the descriptor */ + coh901318_lli_free(&cohc->base->pool, &cohd_fin->data); - dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d" - " nbr_active_done %ld\n", __func__, - cohc->id, cohc->pending_irqs, cohc->nbr_active_done); + /* return desc to free-list */ + coh901318_desc_remove(cohd_fin); + coh901318_desc_free(cohc, cohd_fin); - /* callback to client */ - callback = cohd_fin->desc.callback; - callback_param = cohd_fin->desc.callback_param; + spin_unlock_irqrestore(&cohc->lock, flags); - if (!cohd_fin->pending_irqs) { - coh901318_desc_remove(cohd_fin); + /* Call the callback when we're done */ + if (callback) + callback(callback_param); - /* return desc to free-list */ - coh901318_desc_free(cohc, cohd_fin); - } + spin_lock_irqsave(&cohc->lock, flags); /* * If another interrupt fired while the tasklet was scheduling, @@ -655,9 +643,7 @@ static void dma_tasklet(unsigned long data) * be handled for this channel. If there happen to be more than * one IRQ to be ack:ed, we simply schedule this tasklet again. */ - if (cohc->nbr_active_done) - cohc->nbr_active_done--; - + cohc->nbr_active_done--; if (cohc->nbr_active_done) { dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs " "came in while we were scheduling this tasklet\n"); @@ -666,10 +652,8 @@ static void dma_tasklet(unsigned long data) else tasklet_schedule(&cohc->tasklet); } - spin_unlock_irqrestore(&cohc->lock, flags); - if (callback) - callback(callback_param); + spin_unlock_irqrestore(&cohc->lock, flags); return; @@ -688,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc) if (!cohc->allocated) return; - BUG_ON(cohc->pending_irqs == 0); + spin_lock(&cohc->lock); - cohc->pending_irqs--; cohc->nbr_active_done++; - if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL) + if (coh901318_queue_start(cohc) == NULL) cohc->busy = 0; BUG_ON(list_empty(&cohc->active)); + spin_unlock(&cohc->lock); + if (cohc_chan_conf(cohc)->priority_high) tasklet_hi_schedule(&cohc->tasklet); else @@ -951,6 +936,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; unsigned long flg; + int ret; if (!sgl) goto out; @@ -1010,13 +996,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, goto err_dma_alloc; /* initiate allocated data list */ - cohd->pending_irqs = - coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, - cohc_dev_addr(cohc), - ctrl_chained, - ctrl, - ctrl_last, - direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); + ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, + cohc_dev_addr(cohc), + ctrl_chained, + ctrl, + ctrl_last, + direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); + if (ret) + goto err_lli_fill; COH_DBG(coh901318_list_print(cohc, data)); @@ -1030,6 +1017,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, spin_unlock_irqrestore(&cohc->lock, flg); return &cohd->desc; + err_lli_fill: err_dma_alloc: err_direction: spin_unlock_irqrestore(&cohc->lock, flg); @@ -1121,7 +1109,6 @@ coh901318_terminate_all(struct dma_chan *chan) cohc->nbr_active_done = 0; cohc->busy = 0; - cohc->pending_irqs = 0; spin_unlock_irqrestore(&cohc->lock, flags); } @@ -1148,7 +1135,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans, spin_lock_init(&cohc->lock); - cohc->pending_irqs = 0; cohc->nbr_active_done = 0; cohc->busy = 0; INIT_LIST_HEAD(&cohc->free); diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index f5120f238a4..5f9af1956ea 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c @@ -166,8 +166,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool, lli->src_addr = src; lli->dst_addr = dst; - /* One irq per single transfer */ - return 1; + return 0; } int @@ -223,8 +222,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, lli->src_addr = src; lli->dst_addr = dst; - /* One irq per single transfer */ - return 1; + return 0; } int @@ -240,7 +238,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, u32 ctrl_sg; dma_addr_t src = 0; dma_addr_t dst = 0; - int nbr_of_irq = 0; u32 bytes_to_transfer; u32 elem_size; @@ -269,9 +266,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, ctrl_sg = ctrl ? ctrl : ctrl_last; - if ((ctrl_sg & ctrl_irq_mask)) - nbr_of_irq++; - if (dir == DMA_TO_DEVICE) /* increment source address */ src = sg_dma_address(sg); @@ -310,8 +304,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, } spin_unlock(&pool->lock); - /* There can be many IRQs per sg transfer */ - return nbr_of_irq; + return 0; err: spin_unlock(&pool->lock); return -EINVAL; -- cgit v1.2.3-70-g09d2 From 516fd4305e5f5718475e81fe5c17c95888a8157b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 2 Mar 2010 20:12:46 +0100 Subject: DMAENGINE: COH 901 318 configure channel direction This makes the COH 901 318 configure channel direction (to or from device) dynamically, instead of being passed in from the platform data. This was necessary in order to get the MMC/SD-card channel bidirectional (all other channels on the U300 were either RX or TX but this one was both). This also sets memcpy() alignent to even 2^2 (32bit) boundaries, which makes the memcpy() stress tests start working. Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/coh901318.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 544c46278f8..1656fdcdb6c 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -928,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_lli *data; struct coh901318_desc *cohd; + const struct coh901318_params *params; struct scatterlist *sg; int len = 0; int size; @@ -935,6 +936,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; + u32 config; unsigned long flg; int ret; @@ -952,10 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, /* Trigger interrupt after last lli */ ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; + params = cohc_chan_param(cohc); + config = params->config; + if (direction == DMA_TO_DEVICE) { u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; + config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY; ctrl_chained |= tx_flags; ctrl_last |= tx_flags; ctrl |= tx_flags; @@ -963,12 +969,15 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; + config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY; ctrl_chained |= rx_flags; ctrl_last |= rx_flags; ctrl |= rx_flags; } else goto err_direction; + coh901318_set_conf(cohc, config); + /* The dma only supports transmitting packages up to * MAX_DMA_PACKET_SIZE. Calculate to total number of * dma elemts required to send the entire sg list @@ -1250,6 +1259,11 @@ static int __init coh901318_probe(struct platform_device *pdev) base->dma_memcpy.device_issue_pending = coh901318_issue_pending; base->dma_memcpy.device_terminate_all = coh901318_terminate_all; base->dma_memcpy.dev = &pdev->dev; + /* + * This controller can only access address at even 32bit boundaries, + * i.e. 2^2 + */ + base->dma_memcpy.copy_align = 2; err = dma_async_device_register(&base->dma_memcpy); if (err) -- cgit v1.2.3-70-g09d2 From 56a5d3cf21c71963c8fc506e9b9d3f71641d9c71 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 2 Mar 2010 20:12:56 +0100 Subject: DMAENGINE: COH 901 318 lli sg offset fix This makes the COH 901 318 respect the scatter offset field by using the sg_phys() rather than the sg_dma_address() so we get a pointer to the actual data we want to send rather than the beginning of the buffer. Also initialize the lli:s a bit more thoroughly. Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/coh901318_lli.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index 5f9af1956ea..71d58c1a1e8 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c @@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) lli = head; lli->phy_this = phy; + lli->link_addr = 0x00000000; + lli->virt_link_addr = 0x00000000U; for (i = 1; i < len; i++) { lli_prev = lli; @@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) DEBUGFS_POOL_COUNTER_ADD(pool, 1); lli->phy_this = phy; + lli->link_addr = 0x00000000; + lli->virt_link_addr = 0x00000000U; lli_prev->link_addr = phy; lli_prev->virt_link_addr = lli; } - lli->link_addr = 0x00000000U; - spin_unlock(&pool->lock); return head; @@ -268,10 +270,10 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, if (dir == DMA_TO_DEVICE) /* increment source address */ - src = sg_dma_address(sg); + src = sg_phys(sg); else /* increment destination address */ - dst = sg_dma_address(sg); + dst = sg_phys(sg); bytes_to_transfer = sg_dma_len(sg); -- cgit v1.2.3-70-g09d2 From 773d9e2d8dbf02cfaf65786cf9100eef02c9fda4 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 3 Mar 2010 11:47:42 -0700 Subject: ioat3: cleanup, don't enable DCA completion writes We already disallow raid operations while DCA is globally enabled, so having it locally enabled is a nop and confusing when reading the code. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v3.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 9908c9e94b2..f270fa11275 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -334,8 +334,7 @@ static void ioat3_cleanup_tasklet(unsigned long data) struct ioat2_dma_chan *ioat = (void *) data; ioat3_cleanup(ioat); - writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN, - ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); + writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) -- cgit v1.2.3-70-g09d2 From b372ec2d900a5b50e47ef9e9624536ad146236be Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 3 Mar 2010 11:47:42 -0700 Subject: ioat3: use ioat2_quiesce() Replace open coded ioat2_quiesce() call in ioat3_restart_channel Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v3.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index f270fa11275..bff48e8cffc 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -341,16 +341,8 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; - u32 status; - - status = ioat_chansts(chan); - if (is_ioat_active(status) || is_ioat_idle(status)) - ioat_suspend(chan); - while (is_ioat_active(status) || is_ioat_idle(status)) { - status = ioat_chansts(chan); - cpu_relax(); - } + ioat2_quiesce(chan, 0); if (ioat_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); -- cgit v1.2.3-70-g09d2 From 281befa5592b0c5f9a3856b5666c62ac66d3d9ee Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 3 Mar 2010 11:47:43 -0700 Subject: ioat2: kill pending flag The pending == 2 case no longer exists in the driver so, we can use ioat2_ring_pending() outside the lock to determine if there might be any descriptors in the ring that the hardware has not seen. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v2.c | 34 ++++++++++++---------------------- drivers/dma/ioat/dma_v2.h | 2 -- 2 files changed, 12 insertions(+), 24 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5f7a500e18d..c6e4531fe52 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order, void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) { - void * __iomem reg_base = ioat->base.reg_base; + struct ioat_chan_common *chan = &ioat->base; - ioat->pending = 0; ioat->dmacount += ioat2_ring_pending(ioat); ioat->issued = ioat->head; /* make descriptor updates globally visible before notifying channel */ wmb(); - writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); - dev_dbg(to_dev(&ioat->base), + writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x count: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); } -void ioat2_issue_pending(struct dma_chan *chan) +void ioat2_issue_pending(struct dma_chan *c) { - struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - spin_lock_bh(&ioat->ring_lock); - if (ioat->pending == 1) + if (ioat2_ring_pending(ioat)) { + spin_lock_bh(&ioat->ring_lock); __ioat2_issue_pending(ioat); - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&ioat->ring_lock); + } } /** * ioat2_update_pending - log pending descriptors * @ioat: ioat2+ channel * - * set pending to '1' unless pending is already set to '2', pending == 2 - * indicates that submission is temporarily blocked due to an in-flight - * reset. If we are already above the ioat_pending_level threshold then - * just issue pending. - * - * called with ring_lock held + * Check if the number of unsubmitted descriptors has exceeded the + * watermark. Called with ring_lock held */ static void ioat2_update_pending(struct ioat2_dma_chan *ioat) { - if (unlikely(ioat->pending == 2)) - return; - else if (ioat2_ring_pending(ioat) > ioat_pending_level) + if (ioat2_ring_pending(ioat) > ioat_pending_level) __ioat2_issue_pending(ioat); - else - ioat->pending = 1; } static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) @@ -546,7 +538,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ioat->head = 0; ioat->issued = 0; ioat->tail = 0; - ioat->pending = 0; ioat->alloc_order = order; spin_unlock_bh(&ioat->ring_lock); @@ -815,7 +806,6 @@ void ioat2_free_chan_resources(struct dma_chan *c) chan->last_completion = 0; chan->completion_dma = 0; - ioat->pending = 0; ioat->dmacount = 0; } diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 3afad8da43c..d211335b48f 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order; * @head: allocated index * @issued: hardware notification point * @tail: cleanup index - * @pending: lock free indicator for issued != head * @dmacount: identical to 'head' except for occasionally resetting to zero * @alloc_order: log2 of the number of allocated descriptors * @ring: software ring buffer implementation of hardware ring @@ -61,7 +60,6 @@ struct ioat2_dma_chan { u16 tail; u16 dmacount; u16 alloc_order; - int pending; struct ioat_ring_ent **ring; spinlock_t ring_lock; }; -- cgit v1.2.3-70-g09d2 From aa75db0080603bae27961c0502812dfd0f522bb3 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 3 Mar 2010 21:21:10 -0700 Subject: ioat: close potential BUG_ON race in the descriptor cleanup path Since ioat_cleanup_preamble() and the update of the last completed descriptor are not synchronized there is a chance that two cleanup threads can see descriptors to clean. If the first cleans up all pending descriptors then the second will trigger the BUG_ON. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v2.c | 2 +- drivers/dma/ioat/dma_v3.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index c6e4531fe52..01ed1cfd3eb 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -158,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) seen_current = true; } ioat->tail += i; - BUG_ON(!seen_current); /* no active descs have written a completion? */ + BUG_ON(active && !seen_current); /* no active descs have written a completion? */ chan->last_completion = phys_complete; if (ioat->head == ioat->tail) { diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index bff48e8cffc..39520f2f7da 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -293,7 +293,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) } } ioat->tail += i; - BUG_ON(!seen_current); /* no active descs have written a completion? */ + BUG_ON(active && !seen_current); /* no active descs have written a completion? */ chan->last_completion = phys_complete; if (ioat->head == ioat->tail) { dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", -- cgit v1.2.3-70-g09d2 From b9cc98697d1ca35a86bbb708acc6d93993c28f0f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 3 Mar 2010 21:21:13 -0700 Subject: ioat3: interrupt coalescing The hardware automatically disables further interrupts after each event until rearmed. This allows a delay to be injected between the occurence of the interrupt and the running of the cleanup routine. The delay is scaled by the descriptor backlog and then written to the INTRDELAY register which specifies the number of microseconds to hold off interrupt delivery after an interrupt event occurs. According to powertop this reduces the interrupt rate from ~5000 intr/s to ~150 intr/s per without affecting throughput (simple dd to a raid6 array). Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v3.c | 37 +++++++++++++++++++++++++++++++++---- drivers/dma/ioat/registers.h | 2 +- 2 files changed, 34 insertions(+), 5 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 39520f2f7da..9988f134018 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -295,15 +295,23 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ioat->tail += i; BUG_ON(active && !seen_current); /* no active descs have written a completion? */ chan->last_completion = phys_complete; - if (ioat->head == ioat->tail) { + + active = ioat2_ring_active(ioat); + if (active == 0) { dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", __func__); clear_bit(IOAT_COMPLETION_PENDING, &chan->state); mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } + /* 5 microsecond delay per pending descriptor */ + writew(min((5 * active), IOAT_INTRDELAY_MASK), + chan->device->reg_base + IOAT_INTRDELAY_OFFSET); } -static void ioat3_cleanup(struct ioat2_dma_chan *ioat) +/* try to cleanup, but yield (via spin_trylock) to incoming submissions + * with the expectation that we will immediately poll again shortly + */ +static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; @@ -329,11 +337,32 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat) spin_unlock_bh(&chan->cleanup_lock); } +/* run cleanup now because we already delayed the interrupt via INTRDELAY */ +static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + + prefetch(chan->completion); + + spin_lock_bh(&chan->cleanup_lock); + if (!ioat_cleanup_preamble(chan, &phys_complete)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + spin_lock_bh(&ioat->ring_lock); + + __cleanup(ioat, phys_complete); + + spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&chan->cleanup_lock); +} + static void ioat3_cleanup_tasklet(unsigned long data) { struct ioat2_dma_chan *ioat = (void *) data; - ioat3_cleanup(ioat); + ioat3_cleanup_sync(ioat); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } @@ -417,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) return DMA_SUCCESS; - ioat3_cleanup(ioat); + ioat3_cleanup_poll(ioat); return ioat_is_complete(c, cookie, done, used); } diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index e8ae63baf58..1391798542b 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -60,7 +60,7 @@ #define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ #define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ -#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ +#define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */ #define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ -- cgit v1.2.3-70-g09d2 From aa4d72ae946a4fa40486b871717778734184fa29 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 3 Mar 2010 21:21:13 -0700 Subject: ioat: cleanup ->timer_fn() and ->cleanup_fn() prototypes If the calling convention of ->timer_fn() and ->cleanup_fn() are unified across hardware versions we can drop parameters to ioat_init_channel() and unify ioat_is_dma_complete() implementations. Both ->timer_fn() and ->cleanup_fn() are modified to expect a struct dma_chan pointer. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 46 ++++++++++++++++++++-------------------------- drivers/dma/ioat/dma.h | 11 +++++------ drivers/dma/ioat/dma_v2.c | 34 ++++++++-------------------------- drivers/dma/ioat/dma_v2.h | 4 +--- drivers/dma/ioat/dma_v3.c | 12 ++++++------ 5 files changed, 40 insertions(+), 67 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index dcc4ab78b32..5d0e42b263d 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) return IRQ_HANDLED; } -static void ioat1_cleanup_tasklet(unsigned long data); - /* common channel initialization */ -void ioat_init_channel(struct ioatdma_device *device, - struct ioat_chan_common *chan, int idx, - void (*timer_fn)(unsigned long), - void (*tasklet)(unsigned long), - unsigned long ioat) +void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) { struct dma_device *dma = &device->common; + struct dma_chan *c = &chan->common; + unsigned long data = (unsigned long) c; chan->device = device; chan->reg_base = device->reg_base + (0x80 * (idx + 1)); @@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device, list_add_tail(&chan->common.device_node, &dma->channels); device->idx[idx] = chan; init_timer(&chan->timer); - chan->timer.function = timer_fn; - chan->timer.data = ioat; - tasklet_init(&chan->cleanup_task, tasklet, ioat); + chan->timer.function = device->timer_fn; + chan->timer.data = data; + tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); tasklet_disable(&chan->cleanup_task); } -static void ioat1_timer_event(unsigned long data); - /** * ioat1_dma_enumerate_channels - find and initialize the device's channels * @device: the device to be enumerated @@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) if (!ioat) break; - ioat_init_channel(device, &ioat->base, i, - ioat1_timer_event, - ioat1_cleanup_tasklet, - (unsigned long) ioat); + ioat_init_channel(device, &ioat->base, i); ioat->xfercap = xfercap; spin_lock_init(&ioat->desc_lock); INIT_LIST_HEAD(&ioat->free_desc); @@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, return &desc->txd; } -static void ioat1_cleanup_tasklet(unsigned long data) +static void ioat1_cleanup_event(unsigned long data) { - struct ioat_dma_chan *chan = (void *)data; + struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); - ioat1_cleanup(chan); - writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); + ioat1_cleanup(ioat); + writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, @@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) static void ioat1_timer_event(unsigned long data) { - struct ioat_dma_chan *ioat = (void *) data; + struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); @@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data) spin_unlock_bh(&chan->cleanup_lock); } -static enum dma_status -ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, +enum dma_status +ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) { - struct ioat_dma_chan *ioat = to_ioat_chan(c); + struct ioat_chan_common *chan = to_chan_common(c); + struct ioatdma_device *device = chan->device; if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) return DMA_SUCCESS; - ioat1_cleanup(ioat); + device->cleanup_fn((unsigned long) c); return ioat_is_complete(c, cookie, done, used); } @@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) device->intr_quirk = ioat1_intr_quirk; device->enumerate_channels = ioat1_enumerate_channels; device->self_test = ioat_dma_self_test; + device->timer_fn = ioat1_timer_event; + device->cleanup_fn = ioat1_cleanup_event; dma = &device->common; dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; dma->device_free_chan_resources = ioat1_dma_free_chan_resources; - dma->device_is_tx_complete = ioat1_dma_is_complete; + dma->device_is_tx_complete = ioat_is_dma_complete; err = ioat_probe(device); if (err) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index bbc3e78ef33..4f747a25407 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -61,7 +61,7 @@ * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) * @enumerate_channels: hw version specific channel enumeration * @reset_hw: hw version specific channel (re)initialization - * @cleanup_tasklet: select between the v2 and v3 cleanup routines + * @cleanup_fn: select between the v2 and v3 cleanup routines * @timer_fn: select between the v2 and v3 timer watchdog routines * @self_test: hardware version specific self test for each supported op type * @@ -80,7 +80,7 @@ struct ioatdma_device { void (*intr_quirk)(struct ioatdma_device *device); int (*enumerate_channels)(struct ioatdma_device *device); int (*reset_hw)(struct ioat_chan_common *chan); - void (*cleanup_tasklet)(unsigned long data); + void (*cleanup_fn)(unsigned long data); void (*timer_fn)(unsigned long data); int (*self_test)(struct ioatdma_device *device); }; @@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); void ioat_init_channel(struct ioatdma_device *device, - struct ioat_chan_common *chan, int idx, - void (*timer_fn)(unsigned long), - void (*tasklet)(unsigned long), - unsigned long ioat); + struct ioat_chan_common *chan, int idx); +enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used); void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, size_t len, struct ioat_dma_descriptor *hw); bool ioat_cleanup_preamble(struct ioat_chan_common *chan, diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 01ed1cfd3eb..25a3c72b294 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -199,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) spin_unlock_bh(&chan->cleanup_lock); } -void ioat2_cleanup_tasklet(unsigned long data) +void ioat2_cleanup_event(unsigned long data) { - struct ioat2_dma_chan *ioat = (void *) data; + struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); ioat2_cleanup(ioat); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); @@ -283,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) void ioat2_timer_event(unsigned long data) { - struct ioat2_dma_chan *ioat = (void *) data; + struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; spin_lock_bh(&chan->cleanup_lock); @@ -389,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) if (!ioat) break; - ioat_init_channel(device, &ioat->base, i, - device->timer_fn, - device->cleanup_tasklet, - (unsigned long) ioat); + ioat_init_channel(device, &ioat->base, i); ioat->xfercap_log = xfercap_log; spin_lock_init(&ioat->ring_lock); if (device->reset_hw(&ioat->base)) { @@ -692,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); spin_unlock_bh(&chan->cleanup_lock); - device->timer_fn((unsigned long) ioat); + device->timer_fn((unsigned long) &chan->common); } else spin_unlock_bh(&chan->cleanup_lock); return -ENOMEM; @@ -776,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) tasklet_disable(&chan->cleanup_task); del_timer_sync(&chan->timer); - device->cleanup_tasklet((unsigned long) ioat); + device->cleanup_fn((unsigned long) c); device->reset_hw(chan); spin_lock_bh(&ioat->ring_lock); @@ -809,21 +806,6 @@ void ioat2_free_chan_resources(struct dma_chan *c) ioat->dmacount = 0; } -enum dma_status -ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioatdma_device *device = ioat->base.device; - - if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) - return DMA_SUCCESS; - - device->cleanup_tasklet((unsigned long) ioat); - - return ioat_is_complete(c, cookie, done, used); -} - static ssize_t ring_size_show(struct dma_chan *c, char *page) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); @@ -864,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) device->enumerate_channels = ioat2_enumerate_channels; device->reset_hw = ioat2_reset_hw; - device->cleanup_tasklet = ioat2_cleanup_tasklet; + device->cleanup_fn = ioat2_cleanup_event; device->timer_fn = ioat2_timer_event; device->self_test = ioat_dma_self_test; dma = &device->common; @@ -872,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_is_tx_complete = ioat2_is_complete; + dma->device_is_tx_complete = ioat_is_dma_complete; err = ioat_probe(device); if (err) diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index d211335b48f..ef2871fd786 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -176,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, void ioat2_issue_pending(struct dma_chan *chan); int ioat2_alloc_chan_resources(struct dma_chan *c); void ioat2_free_chan_resources(struct dma_chan *c); -enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used); void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); bool reshape_ring(struct ioat2_dma_chan *ioat, int order); void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); -void ioat2_cleanup_tasklet(unsigned long data); +void ioat2_cleanup_event(unsigned long data); void ioat2_timer_event(unsigned long data); int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 9988f134018..26febc56dab 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -358,9 +358,9 @@ static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat) spin_unlock_bh(&chan->cleanup_lock); } -static void ioat3_cleanup_tasklet(unsigned long data) +static void ioat3_cleanup_event(unsigned long data) { - struct ioat2_dma_chan *ioat = (void *) data; + struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); ioat3_cleanup_sync(ioat); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); @@ -380,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) static void ioat3_timer_event(unsigned long data) { - struct ioat2_dma_chan *ioat = (void *) data; + struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; spin_lock_bh(&chan->cleanup_lock); @@ -1259,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) if (is_raid_device) { dma->device_is_tx_complete = ioat3_is_complete; - device->cleanup_tasklet = ioat3_cleanup_tasklet; + device->cleanup_fn = ioat3_cleanup_event; device->timer_fn = ioat3_timer_event; } else { - dma->device_is_tx_complete = ioat2_is_complete; - device->cleanup_tasklet = ioat2_cleanup_tasklet; + dma->device_is_tx_complete = ioat_is_dma_complete; + device->cleanup_fn = ioat2_cleanup_event; device->timer_fn = ioat2_timer_event; } -- cgit v1.2.3-70-g09d2 From 984b3f5746ed2cde3d184651dabf26980f2b66e5 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 5 Mar 2010 13:41:37 -0800 Subject: bitops: rename for_each_bit() to for_each_set_bit() Rename for_each_bit to for_each_set_bit in the kernel source tree. To permit for_each_clear_bit(), should that ever be added. The patch includes a macro to map the old for_each_bit() onto the new for_each_set_bit(). This is a (very) temporary thing to ease the migration. [akpm@linux-foundation.org: add temporary for_each_bit()] Suggested-by: Alexey Dobriyan Suggested-by: Andrew Morton Signed-off-by: Akinobu Mita Cc: "David S. Miller" Cc: Russell King Cc: David Woodhouse Cc: Artem Bityutskiy Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/cpu/perf_event.c | 2 +- arch/x86/kernel/cpu/perf_event_intel.c | 2 +- drivers/dma/ioat/dma.c | 2 +- drivers/gpio/pl061.c | 2 +- drivers/gpio/timbgpio.c | 2 +- drivers/i2c/busses/i2c-designware.c | 4 ++-- drivers/mfd/htc-egpio.c | 2 +- drivers/misc/sgi-xp/xpnet.c | 2 +- drivers/net/gianfar.c | 12 ++++++------ drivers/net/ixgbe/ixgbe_main.c | 2 +- drivers/net/ixgbevf/ixgbevf_main.c | 2 +- drivers/net/wireless/ath/ar9170/main.c | 2 +- drivers/net/wireless/iwmc3200wifi/debugfs.c | 2 +- drivers/net/wireless/iwmc3200wifi/rx.c | 2 +- fs/ocfs2/quota_local.c | 2 +- include/linux/bitops.h | 4 +++- kernel/sched_cpupri.c | 2 +- sound/soc/codecs/uda1380.c | 2 +- 18 files changed, 26 insertions(+), 24 deletions(-) (limited to 'drivers/dma') diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 641ccb9dddb..b1fbdeecf6c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -676,7 +676,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (c->weight != w) continue; - for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { + for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { if (!test_bit(j, used_mask)) break; } diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index cf6590cf4a5..977e7544738 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -757,7 +757,7 @@ again: inc_irq_stat(apic_perf_irqs); ack = status; - for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { + for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[bit]; clear_bit(bit, (unsigned long *) &status); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 5d0e42b263d..af14c9a5b8d 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -71,7 +71,7 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) } attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); - for_each_bit(bit, &attnstatus, BITS_PER_LONG) { + for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { chan = ioat_chan_by_index(instance, bit); tasklet_schedule(&chan->cleanup_task); } diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c index 4ee4c8367a3..3ad1eeb4960 100644 --- a/drivers/gpio/pl061.c +++ b/drivers/gpio/pl061.c @@ -219,7 +219,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) if (pending == 0) continue; - for_each_bit(offset, &pending, PL061_GPIO_NR) + for_each_set_bit(offset, &pending, PL061_GPIO_NR) generic_handle_irq(pl061_to_irq(&chip->gc, offset)); } desc->chip->unmask(irq); diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c index d941f45fe55..4ecba6e5a32 100644 --- a/drivers/gpio/timbgpio.c +++ b/drivers/gpio/timbgpio.c @@ -175,7 +175,7 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) ipr = ioread32(tgpio->membase + TGPIO_IPR); iowrite32(ipr, tgpio->membase + TGPIO_ICR); - for_each_bit(offset, &ipr, tgpio->gpio.ngpio) + for_each_set_bit(offset, &ipr, tgpio->gpio.ngpio) generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset)); } diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware.c index 9e18ef97f15..3e72b69aa7f 100644 --- a/drivers/i2c/busses/i2c-designware.c +++ b/drivers/i2c/busses/i2c-designware.c @@ -497,13 +497,13 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) int i; if (abort_source & DW_IC_TX_ABRT_NOACK) { - for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) + for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) dev_dbg(dev->dev, "%s: %s\n", __func__, abort_sources[i]); return -EREMOTEIO; } - for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) + for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); if (abort_source & DW_IC_TX_ARB_LOST) diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c index aa266e1f69b..addb846c1e3 100644 --- a/drivers/mfd/htc-egpio.c +++ b/drivers/mfd/htc-egpio.c @@ -108,7 +108,7 @@ static void egpio_handler(unsigned int irq, struct irq_desc *desc) ack_irqs(ei); /* Process all set pins. */ readval &= ei->irqs_enabled; - for_each_bit(irqpin, &readval, ei->nirqs) { + for_each_set_bit(irqpin, &readval, ei->nirqs) { /* Run irq handler */ pr_debug("got IRQ %d\n", irqpin); irq = ei->irq_start + irqpin; diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 16f0abda142..57b152f8d1b 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -475,7 +475,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->data[0] == 0xff) { /* we are being asked to broadcast to all partitions */ - for_each_bit(dest_partid, xpnet_broadcast_partitions, + for_each_set_bit(dest_partid, xpnet_broadcast_partitions, xp_max_npartitions) { xpnet_send(skb, queued_msg, start_addr, end_addr, diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 6aa526ee909..61a7b4351e7 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -998,7 +998,7 @@ static int gfar_probe(struct of_device *ofdev, } /* Need to reverse the bit maps as bit_map's MSB is q0 - * but, for_each_bit parses from right to left, which + * but, for_each_set_bit parses from right to left, which * basically reverses the queue numbers */ for (i = 0; i< priv->num_grps; i++) { priv->gfargrp[i].tx_bit_map = reverse_bitmap( @@ -1011,7 +1011,7 @@ static int gfar_probe(struct of_device *ofdev, * also assign queues to groups */ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { priv->gfargrp[grp_idx].num_rx_queues = 0x0; - for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, + for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, priv->num_rx_queues) { priv->gfargrp[grp_idx].num_rx_queues++; priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; @@ -1019,7 +1019,7 @@ static int gfar_probe(struct of_device *ofdev, rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); } priv->gfargrp[grp_idx].num_tx_queues = 0x0; - for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map, + for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, priv->num_tx_queues) { priv->gfargrp[grp_idx].num_tx_queues++; priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; @@ -1709,7 +1709,7 @@ void gfar_configure_coalescing(struct gfar_private *priv, if (priv->mode == MQ_MG_MODE) { baddr = ®s->txic0; - for_each_bit (i, &tx_mask, priv->num_tx_queues) { + for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { if (likely(priv->tx_queue[i]->txcoalescing)) { gfar_write(baddr + i, 0); gfar_write(baddr + i, priv->tx_queue[i]->txic); @@ -1717,7 +1717,7 @@ void gfar_configure_coalescing(struct gfar_private *priv, } baddr = ®s->rxic0; - for_each_bit (i, &rx_mask, priv->num_rx_queues) { + for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { if (likely(priv->rx_queue[i]->rxcoalescing)) { gfar_write(baddr + i, 0); gfar_write(baddr + i, priv->rx_queue[i]->rxic); @@ -2607,7 +2607,7 @@ static int gfar_poll(struct napi_struct *napi, int budget) budget_per_queue = left_over_budget/num_queues; left_over_budget = 0; - for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { + for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { if (test_bit(i, &serviced_queues)) continue; rx_queue = priv->rx_queue[i]; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 45e3532b166..684af371462 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1050,7 +1050,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { q_vector = adapter->q_vector[v_idx]; - /* XXX for_each_bit(...) */ + /* XXX for_each_set_bit(...) */ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index 235b5fd4b8d..ca653c49b76 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -751,7 +751,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { q_vector = adapter->q_vector[v_idx]; - /* XXX for_each_bit(...) */ + /* XXX for_each_set_bit(...) */ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c index 8a964f13036..a6452af9c6c 100644 --- a/drivers/net/wireless/ath/ar9170/main.c +++ b/drivers/net/wireless/ath/ar9170/main.c @@ -394,7 +394,7 @@ static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar) ieee80211_tx_status_irqsafe(ar->hw, skb); } - for_each_bit(i, &queue_bitmap, BITS_PER_BYTE) { + for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) { #ifdef AR9170_QUEUE_STOP_DEBUG printk(KERN_DEBUG "%s: wake queue %d\n", wiphy_name(ar->hw->wiphy), i); diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c index be992ca41cf..c29c994de0e 100644 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c @@ -89,7 +89,7 @@ static int iwm_debugfs_dbg_modules_write(void *data, u64 val) for (i = 0; i < __IWM_DM_NR; i++) iwm->dbg.dbg_module[i] = 0; - for_each_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR) + for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR) iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level; return 0; diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index ad8f7eabb5a..8456b4dbd14 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c @@ -1116,7 +1116,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf, return -EINVAL; } - for_each_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) { + for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) { tid_info = &sta_info->tid_info[bit]; mutex_lock(&tid_info->mutex); diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 21f9e71223c..a6467f3d262 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -457,7 +457,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode, break; } dchunk = (struct ocfs2_local_disk_chunk *)hbh->b_data; - for_each_bit(bit, rchunk->rc_bitmap, ol_chunk_entries(sb)) { + for_each_set_bit(bit, rchunk->rc_bitmap, ol_chunk_entries(sb)) { qbh = NULL; status = ocfs2_read_quota_block(lqinode, ol_dqblk_block(sb, chunk, bit), diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 25b8b2f33ae..b7938987923 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -16,11 +16,13 @@ */ #include -#define for_each_bit(bit, addr, size) \ +#define for_each_set_bit(bit, addr, size) \ for ((bit) = find_first_bit((addr), (size)); \ (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) +/* Temporary */ +#define for_each_bit(bit, addr, size) for_each_set_bit(bit, addr, size) static __inline__ int get_bitmask_order(unsigned int count) { diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index eeb3506c483..82095bf2099 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -47,7 +47,7 @@ static int convert_prio(int prio) } #define for_each_cpupri_active(array, idx) \ - for_each_bit(idx, array, CPUPRI_NR_PRIORITIES) + for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES) /** * cpupri_find - find the best (lowest-pri) CPU in the system diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c index a2763c2e734..9cd0a66b766 100644 --- a/sound/soc/codecs/uda1380.c +++ b/sound/soc/codecs/uda1380.c @@ -137,7 +137,7 @@ static void uda1380_flush_work(struct work_struct *work) { int bit, reg; - for_each_bit(bit, &uda1380_cache_dirty, UDA1380_CACHEREGNUM - 0x10) { + for_each_set_bit(bit, &uda1380_cache_dirty, UDA1380_CACHEREGNUM - 0x10) { reg = 0x10 + bit; pr_debug("uda1380: flush reg %x val %x:\n", reg, uda1380_read_reg_cache(uda1380_codec, reg)); -- cgit v1.2.3-70-g09d2