diff options
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r-- | drivers/dma/ste_dma40.c | 860 |
1 files changed, 532 insertions, 328 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index c426829f6ab..17e2600a00c 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -30,14 +30,16 @@ /* Maximum iterations taken before giving up suspending a channel */ #define D40_SUSPEND_MAX_IT 500 +/* Hardware requirement on LCLA alignment */ +#define LCLA_ALIGNMENT 0x40000 +/* Attempts before giving up to trying to get pages that are aligned */ +#define MAX_LCLA_ALLOC_ATTEMPTS 256 + +/* Bit markings for allocation map */ #define D40_ALLOC_FREE (1 << 31) #define D40_ALLOC_PHY (1 << 30) #define D40_ALLOC_LOG_FREE 0 -/* The number of free d40_desc to keep in memory before starting - * to kfree() them */ -#define D40_DESC_CACHE_SIZE 50 - /* Hardware designer of the block */ #define D40_PERIPHID2_DESIGNER 0x8 @@ -68,9 +70,9 @@ enum d40_command { */ struct d40_lli_pool { void *base; - int size; + int size; /* Space for dst and src, plus an extra for padding */ - u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; + u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; }; /** @@ -81,9 +83,10 @@ struct d40_lli_pool { * lli_len equals one. * @lli_log: Same as above but for logical channels. * @lli_pool: The pool with two entries pre-allocated. - * @lli_len: Number of LLI's in lli_pool - * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len - * then this transfer job is done. + * @lli_len: Number of llis of current descriptor. + * @lli_count: Number of transfered llis. + * @lli_tx_len: Max number of LLIs per transfer, there can be + * many transfer for one descriptor. * @txd: DMA engine struct. Used for among other things for communication * during a transfer. * @node: List entry. @@ -100,8 +103,9 @@ struct d40_desc { struct d40_log_lli_bidir lli_log; struct d40_lli_pool lli_pool; - u32 lli_len; - u32 lli_tcount; + int lli_len; + int lli_count; + u32 lli_tx_len; struct dma_async_tx_descriptor txd; struct list_head node; @@ -113,18 +117,20 @@ struct d40_desc { /** * struct d40_lcla_pool - LCLA pool settings and data. * - * @base: The virtual address of LCLA. - * @phy: Physical base address of LCLA. - * @base_size: size of lcla. + * @base: The virtual address of LCLA. 18 bit aligned. + * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. + * This pointer is only there for clean-up on error. + * @pages: The number of pages needed for all physical channels. + * Only used later for clean-up on error * @lock: Lock to protect the content in this struct. - * @alloc_map: Mapping between physical channel and LCLA entries. + * @alloc_map: Bitmap mapping between physical channel and LCLA entries. * @num_blocks: The number of entries of alloc_map. Equals to the * number of physical channels. */ struct d40_lcla_pool { void *base; - dma_addr_t phy; - resource_size_t base_size; + void *base_unaligned; + int pages; spinlock_t lock; u32 *alloc_map; int num_blocks; @@ -163,15 +169,14 @@ struct d40_base; * @pending_tx: The number of pending transfers. Used between interrupt handler * and tasklet. * @busy: Set to true when transfer is ongoing on this channel. - * @phy_chan: Pointer to physical channel which this instance runs on. + * @phy_chan: Pointer to physical channel which this instance runs on. If this + * point is NULL, then the channel is not allocated. * @chan: DMA engine handle. * @tasklet: Tasklet that gets scheduled from interrupt context to complete a * transfer and call client callback. * @client: Cliented owned descriptor list. * @active: Active descriptor. * @queue: Queued jobs. - * @free: List of free descripts, ready to be reused. - * @free_len: Number of descriptors in the free list. * @dma_cfg: The client configuration of this dma channel. * @base: Pointer to the device instance struct. * @src_def_cfg: Default cfg register setting for src. @@ -195,8 +200,6 @@ struct d40_chan { struct list_head client; struct list_head active; struct list_head queue; - struct list_head free; - int free_len; struct stedma40_chan_cfg dma_cfg; struct d40_base *base; /* Default register configurations */ @@ -205,6 +208,9 @@ struct d40_chan { struct d40_def_lcsp log_def; struct d40_lcla_elem lcla; struct d40_log_lli_full *lcpa; + /* Runtime reconfiguration */ + dma_addr_t runtime_addr; + enum dma_data_direction runtime_direction; }; /** @@ -215,6 +221,7 @@ struct d40_chan { * the same physical register. * @dev: The device structure. * @virtbase: The virtual base address of the DMA's register. + * @rev: silicon revision detected. * @clk: Pointer to the DMA clock structure. * @phy_start: Physical memory start of the DMA registers. * @phy_size: Size of the DMA register map. @@ -240,12 +247,14 @@ struct d40_chan { * @lcpa_base: The virtual mapped address of LCPA. * @phy_lcpa: The physical address of the LCPA. * @lcpa_size: The size of the LCPA area. + * @desc_slab: cache for descriptors. */ struct d40_base { spinlock_t interrupt_lock; spinlock_t execmd_lock; struct device *dev; void __iomem *virtbase; + u8 rev:4; struct clk *clk; phys_addr_t phy_start; resource_size_t phy_size; @@ -266,6 +275,7 @@ struct d40_base { void *lcpa_base; dma_addr_t phy_lcpa; resource_size_t lcpa_size; + struct kmem_cache *desc_slab; }; /** @@ -365,11 +375,6 @@ static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, return cookie; } -static void d40_desc_reset(struct d40_desc *d40d) -{ - d40d->lli_tcount = 0; -} - static void d40_desc_remove(struct d40_desc *d40d) { list_del(&d40d->node); @@ -377,7 +382,6 @@ static void d40_desc_remove(struct d40_desc *d40d) static struct d40_desc *d40_desc_get(struct d40_chan *d40c) { - struct d40_desc *desc; struct d40_desc *d; struct d40_desc *_d; @@ -386,36 +390,21 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) if (async_tx_test_ack(&d->txd)) { d40_pool_lli_free(d); d40_desc_remove(d); - desc = d; - goto out; + break; } - } - - if (list_empty(&d40c->free)) { - /* Alloc new desc because we're out of used ones */ - desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT); - if (desc == NULL) - goto out; - INIT_LIST_HEAD(&desc->node); } else { - /* Reuse an old desc. */ - desc = list_first_entry(&d40c->free, - struct d40_desc, - node); - list_del(&desc->node); - d40c->free_len--; + d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT); + if (d != NULL) { + memset(d, 0, sizeof(struct d40_desc)); + INIT_LIST_HEAD(&d->node); + } } -out: - return desc; + return d; } static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) { - if (d40c->free_len < D40_DESC_CACHE_SIZE) { - list_add_tail(&d40d->node, &d40c->free); - d40c->free_len++; - } else - kfree(d40d); + kmem_cache_free(d40c->base->desc_slab, d40d); } static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) @@ -456,37 +445,41 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) /* Support functions for logical channels */ -static int d40_lcla_id_get(struct d40_chan *d40c, - struct d40_lcla_pool *pool) +static int d40_lcla_id_get(struct d40_chan *d40c) { int src_id = 0; int dst_id = 0; struct d40_log_lli *lcla_lidx_base = - pool->base + d40c->phy_chan->num * 1024; + d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024; int i; int lli_per_log = d40c->base->plat_data->llis_per_log; + unsigned long flags; if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) return 0; - if (pool->num_blocks > 32) + if (d40c->base->lcla_pool.num_blocks > 32) return -EINVAL; - spin_lock(&pool->lock); + spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); - for (i = 0; i < pool->num_blocks; i++) { - if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { - pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); + for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) { + if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & + (0x1 << i))) { + d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= + (0x1 << i); break; } } src_id = i; - if (src_id >= pool->num_blocks) + if (src_id >= d40c->base->lcla_pool.num_blocks) goto err; - for (; i < pool->num_blocks; i++) { - if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { - pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); + for (; i < d40c->base->lcla_pool.num_blocks; i++) { + if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & + (0x1 << i))) { + d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= + (0x1 << i); break; } } @@ -500,28 +493,13 @@ static int d40_lcla_id_get(struct d40_chan *d40c, d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; - - spin_unlock(&pool->lock); + spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); return 0; err: - spin_unlock(&pool->lock); + spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); return -EINVAL; } -static void d40_lcla_id_put(struct d40_chan *d40c, - struct d40_lcla_pool *pool, - int id) -{ - if (id < 0) - return; - - d40c->lcla.src_id = -1; - d40c->lcla.dst_id = -1; - - spin_lock(&pool->lock); - pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id)); - spin_unlock(&pool->lock); -} static int d40_channel_execute_command(struct d40_chan *d40c, enum d40_command command) @@ -530,6 +508,7 @@ static int d40_channel_execute_command(struct d40_chan *d40c, void __iomem *active_reg; int ret = 0; unsigned long flags; + u32 wmask; spin_lock_irqsave(&d40c->base->execmd_lock, flags); @@ -547,7 +526,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c, goto done; } - writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg); + wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); + writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), + active_reg); if (command == D40_DMA_SUSPEND_REQ) { @@ -586,8 +567,7 @@ done: static void d40_term_all(struct d40_chan *d40c) { struct d40_desc *d40d; - struct d40_desc *d; - struct d40_desc *_d; + unsigned long flags; /* Release active descriptors */ while ((d40d = d40_first_active_get(d40c))) { @@ -605,19 +585,17 @@ static void d40_term_all(struct d40_chan *d40c) d40_desc_free(d40c, d40d); } - /* Release client owned descriptors */ - if (!list_empty(&d40c->client)) - list_for_each_entry_safe(d, _d, &d40c->client, node) { - d40_pool_lli_free(d); - d40_desc_remove(d); - /* Return desc to free-list */ - d40_desc_free(d40c, d40d); - } + spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); + + d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= + (~(0x1 << d40c->lcla.dst_id)); + d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= + (~(0x1 << d40c->lcla.src_id)); + + d40c->lcla.src_id = -1; + d40c->lcla.dst_id = -1; - d40_lcla_id_put(d40c, &d40c->base->lcla_pool, - d40c->lcla.src_id); - d40_lcla_id_put(d40c, &d40c->base->lcla_pool, - d40c->lcla.dst_id); + spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); d40c->pending_tx = 0; d40c->busy = false; @@ -628,6 +606,7 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) u32 val; unsigned long flags; + /* Notice, that disable requires the physical channel to be stopped */ if (do_enable) val = D40_ACTIVATE_EVENTLINE; else @@ -732,31 +711,34 @@ static int d40_config_write(struct d40_chan *d40c) static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) { - if (d40d->lli_phy.dst && d40d->lli_phy.src) { d40_phy_lli_write(d40c->base->virtbase, d40c->phy_chan->num, d40d->lli_phy.dst, d40d->lli_phy.src); - d40d->lli_tcount = d40d->lli_len; } else if (d40d->lli_log.dst && d40d->lli_log.src) { - u32 lli_len; struct d40_log_lli *src = d40d->lli_log.src; struct d40_log_lli *dst = d40d->lli_log.dst; - - src += d40d->lli_tcount; - dst += d40d->lli_tcount; - - if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) - lli_len = d40d->lli_len; - else - lli_len = d40c->base->plat_data->llis_per_log; - d40d->lli_tcount += lli_len; - d40_log_lli_write(d40c->lcpa, d40c->lcla.src, - d40c->lcla.dst, - dst, src, - d40c->base->plat_data->llis_per_log); + int s; + + src += d40d->lli_count; + dst += d40d->lli_count; + s = d40_log_lli_write(d40c->lcpa, + d40c->lcla.src, d40c->lcla.dst, + dst, src, + d40c->base->plat_data->llis_per_log); + + /* If s equals to zero, the job is not linked */ + if (s > 0) { + (void) dma_map_single(d40c->base->dev, d40c->lcla.src, + s * sizeof(struct d40_log_lli), + DMA_TO_DEVICE); + (void) dma_map_single(d40c->base->dev, d40c->lcla.dst, + s * sizeof(struct d40_log_lli), + DMA_TO_DEVICE); + } } + d40d->lli_count += d40d->lli_tx_len; } static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) @@ -780,18 +762,21 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) static int d40_start(struct d40_chan *d40c) { - int err; + if (d40c->base->rev == 0) { + int err; - if (d40c->log_num != D40_PHY_CHAN) { - err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - if (err) - return err; - d40_config_set_event(d40c, true); + if (d40c->log_num != D40_PHY_CHAN) { + err = d40_channel_execute_command(d40c, + D40_DMA_SUSPEND_REQ); + if (err) + return err; + } } - err = d40_channel_execute_command(d40c, D40_DMA_RUN); + if (d40c->log_num != D40_PHY_CHAN) + d40_config_set_event(d40c, true); - return err; + return d40_channel_execute_command(d40c, D40_DMA_RUN); } static struct d40_desc *d40_queue_start(struct d40_chan *d40c) @@ -838,7 +823,7 @@ static void dma_tc_handle(struct d40_chan *d40c) if (d40d == NULL) return; - if (d40d->lli_tcount < d40d->lli_len) { + if (d40d->lli_count < d40d->lli_len) { d40_desc_load(d40c, d40d); /* Start dma job */ @@ -891,7 +876,6 @@ static void dma_tasklet(unsigned long data) /* Return desc to free-list */ d40_desc_free(d40c, d40d_fin); } else { - d40_desc_reset(d40d_fin); if (!d40d_fin->is_in_client_list) { d40_desc_remove(d40d_fin); list_add_tail(&d40d_fin->node, &d40c->client); @@ -975,7 +959,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) if (!il[row].is_error) dma_tc_handle(d40c); else - dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n", + dev_err(base->dev, + "[%s] IRQ chan: %ld offset %d idx %d\n", __func__, chan, il[row].offset, idx); spin_unlock(&d40c->lock); @@ -1134,7 +1119,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) int j; int log_num; bool is_src; - bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) + bool is_log = (d40c->dma_cfg.channel_type & + STEDMA40_CHANNEL_IN_OPER_MODE) == STEDMA40_CHANNEL_IN_LOG_MODE; @@ -1169,8 +1155,10 @@ static int d40_allocate_channel(struct d40_chan *d40c) for (j = 0; j < d40c->base->num_phy_chans; j += 8) { int phy_num = j + event_group * 2; for (i = phy_num; i < phy_num + 2; i++) { - if (d40_alloc_mask_set(&phys[i], is_src, - 0, is_log)) + if (d40_alloc_mask_set(&phys[i], + is_src, + 0, + is_log)) goto found_phy; } } @@ -1221,30 +1209,6 @@ out: } -static int d40_config_chan(struct d40_chan *d40c, - struct stedma40_chan_cfg *info) -{ - - /* Fill in basic CFG register values */ - d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, - &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); - - if (d40c->log_num != D40_PHY_CHAN) { - d40_log_cfg(&d40c->dma_cfg, - &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); - - if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) - d40c->lcpa = d40c->base->lcpa_base + - d40c->dma_cfg.src_dev_type * 32; - else - d40c->lcpa = d40c->base->lcpa_base + - d40c->dma_cfg.dst_dev_type * 32 + 16; - } - - /* Write channel configuration to the DMA */ - return d40_config_write(d40c); -} - static int d40_config_memcpy(struct d40_chan *d40c) { dma_cap_mask_t cap = d40c->chan.device->cap_mask; @@ -1272,13 +1236,25 @@ static int d40_free_dma(struct d40_chan *d40c) { int res = 0; - u32 event, dir; + u32 event; struct d40_phy_res *phy = d40c->phy_chan; bool is_src; + struct d40_desc *d; + struct d40_desc *_d; + /* Terminate all queued and active transfers */ d40_term_all(d40c); + /* Release client owned descriptors */ + if (!list_empty(&d40c->client)) + list_for_each_entry_safe(d, _d, &d40c->client, node) { + d40_pool_lli_free(d); + d40_desc_remove(d); + /* Return desc to free-list */ + d40_desc_free(d40c, d); + } + if (phy == NULL) { dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", __func__); @@ -1292,22 +1268,12 @@ static int d40_free_dma(struct d40_chan *d40c) return -EINVAL; } - - res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - if (res) { - dev_err(&d40c->chan.dev->device, "[%s] suspend\n", - __func__); - return res; - } - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); - dir = D40_CHAN_REG_SDLNK; is_src = false; } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); - dir = D40_CHAN_REG_SSLNK; is_src = true; } else { dev_err(&d40c->chan.dev->device, @@ -1315,16 +1281,17 @@ static int d40_free_dma(struct d40_chan *d40c) return -EINVAL; } + res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); + if (res) { + dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", + __func__); + return res; + } + if (d40c->log_num != D40_PHY_CHAN) { - /* - * Release logical channel, deactivate the event line during - * the time physical res is suspended. - */ - writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) & - D40_EVENTLINE_MASK(event), - d40c->base->virtbase + D40_DREG_PCBASE + - phy->num * D40_DREG_PCDELTA + dir); + /* Release logical channel, deactivate the event line */ + d40_config_set_event(d40c, false); d40c->base->lookup_log_chans[d40c->log_num] = NULL; /* @@ -1345,8 +1312,9 @@ static int d40_free_dma(struct d40_chan *d40c) } return 0; } - } else - d40_alloc_mask_free(phy, is_src, 0); + } else { + (void) d40_alloc_mask_free(phy, is_src, 0); + } /* Release physical channel */ res = d40_channel_execute_command(d40c, D40_DMA_STOP); @@ -1361,8 +1329,6 @@ static int d40_free_dma(struct d40_chan *d40c) d40c->base->lookup_phy_chans[phy->num] = NULL; return 0; - - } static int d40_pause(struct dma_chan *chan) @@ -1370,7 +1336,6 @@ static int d40_pause(struct dma_chan *chan) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); int res; - unsigned long flags; spin_lock_irqsave(&d40c->lock, flags); @@ -1397,7 +1362,6 @@ static bool d40_is_paused(struct d40_chan *d40c) void __iomem *active_reg; u32 status; u32 event; - int res; spin_lock_irqsave(&d40c->lock, flags); @@ -1416,10 +1380,6 @@ static bool d40_is_paused(struct d40_chan *d40c) goto _exit; } - res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - if (res != 0) - goto _exit; - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); @@ -1436,12 +1396,6 @@ static bool d40_is_paused(struct d40_chan *d40c) if (status != D40_DMA_RUN) is_paused = true; - - /* Resume the other logical channels if any */ - if (d40_chan_has_events(d40c)) - res = d40_channel_execute_command(d40c, - D40_DMA_RUN); - _exit: spin_unlock_irqrestore(&d40c->lock, flags); return is_paused; @@ -1468,13 +1422,14 @@ static u32 d40_residue(struct d40_chan *d40c) u32 num_elt; if (d40c->log_num != D40_PHY_CHAN) - num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) + num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) >> D40_MEM_LCSP2_ECNT_POS; else num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT) & - D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS; + D40_SREG_ELEM_PHY_ECNT_MASK) >> + D40_SREG_ELEM_PHY_ECNT_POS; return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); } @@ -1487,20 +1442,21 @@ static int d40_resume(struct dma_chan *chan) spin_lock_irqsave(&d40c->lock, flags); - if (d40c->log_num != D40_PHY_CHAN) { - res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - if (res) - goto out; + if (d40c->base->rev == 0) + if (d40c->log_num != D40_PHY_CHAN) { + res = d40_channel_execute_command(d40c, + D40_DMA_SUSPEND_REQ); + goto no_suspend; + } - /* If bytes left to transfer or linked tx resume job */ - if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { + /* If bytes left to transfer or linked tx resume job */ + if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { + if (d40c->log_num != D40_PHY_CHAN) d40_config_set_event(d40c, true); - res = d40_channel_execute_command(d40c, D40_DMA_RUN); - } - } else if (d40_residue(d40c) || d40_tx_is_linked(d40c)) res = d40_channel_execute_command(d40c, D40_DMA_RUN); + } -out: +no_suspend: spin_unlock_irqrestore(&d40c->lock, flags); return res; } @@ -1534,8 +1490,10 @@ int stedma40_set_psize(struct dma_chan *chan, if (d40c->log_num != D40_PHY_CHAN) { d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; - d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; - d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; + d40c->log_def.lcsp1 |= src_psize << + D40_MEM_LCSP1_SCFG_PSIZE_POS; + d40c->log_def.lcsp3 |= dst_psize << + D40_MEM_LCSP1_SCFG_PSIZE_POS; goto out; } @@ -1566,37 +1524,42 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, struct scatterlist *sgl_dst, struct scatterlist *sgl_src, unsigned int sgl_len, - unsigned long flags) + unsigned long dma_flags) { int res; struct d40_desc *d40d; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); - unsigned long flg; - int lli_max = d40c->base->plat_data->llis_per_log; + unsigned long flags; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Unallocated channel.\n", __func__); + return ERR_PTR(-EINVAL); + } - spin_lock_irqsave(&d40c->lock, flg); + spin_lock_irqsave(&d40c->lock, flags); d40d = d40_desc_get(d40c); if (d40d == NULL) goto err; - memset(d40d, 0, sizeof(struct d40_desc)); d40d->lli_len = sgl_len; - - d40d->txd.flags = flags; + d40d->lli_tx_len = d40d->lli_len; + d40d->txd.flags = dma_flags; if (d40c->log_num != D40_PHY_CHAN) { + if (d40d->lli_len > d40c->base->plat_data->llis_per_log) + d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; + if (sgl_len > 1) /* * Check if there is space available in lcla. If not, * split list into 1-length and run only in lcpa * space. */ - if (d40_lcla_id_get(d40c, - &d40c->base->lcla_pool) != 0) - lli_max = 1; + if (d40_lcla_id_get(d40c) != 0) + d40d->lli_tx_len = 1; if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { dev_err(&d40c->chan.dev->device, @@ -1610,7 +1573,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40d->lli_log.src, d40c->log_def.lcsp1, d40c->dma_cfg.src_info.data_width, - flags & DMA_PREP_INTERRUPT, lli_max, + dma_flags & DMA_PREP_INTERRUPT, + d40d->lli_tx_len, d40c->base->plat_data->llis_per_log); (void) d40_log_sg_to_lli(d40c->lcla.dst_id, @@ -1619,7 +1583,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40d->lli_log.dst, d40c->log_def.lcsp3, d40c->dma_cfg.dst_info.data_width, - flags & DMA_PREP_INTERRUPT, lli_max, + dma_flags & DMA_PREP_INTERRUPT, + d40d->lli_tx_len, d40c->base->plat_data->llis_per_log); @@ -1664,11 +1629,11 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40d->txd.tx_submit = d40_tx_submit; - spin_unlock_irqrestore(&d40c->lock, flg); + spin_unlock_irqrestore(&d40c->lock, flags); return &d40d->txd; err: - spin_unlock_irqrestore(&d40c->lock, flg); + spin_unlock_irqrestore(&d40c->lock, flags); return NULL; } EXPORT_SYMBOL(stedma40_memcpy_sg); @@ -1698,46 +1663,66 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) unsigned long flags; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); - + bool is_free_phy; spin_lock_irqsave(&d40c->lock, flags); d40c->completed = chan->cookie = 1; /* * If no dma configuration is set (channel_type == 0) - * use default configuration + * use default configuration (memcpy) */ if (d40c->dma_cfg.channel_type == 0) { err = d40_config_memcpy(d40c); - if (err) - goto err_alloc; + if (err) { + dev_err(&d40c->chan.dev->device, + "[%s] Failed to configure memcpy channel\n", + __func__); + goto fail; + } } + is_free_phy = (d40c->phy_chan == NULL); err = d40_allocate_channel(d40c); if (err) { dev_err(&d40c->chan.dev->device, "[%s] Failed to allocate channel\n", __func__); - goto err_alloc; + goto fail; } - err = d40_config_chan(d40c, &d40c->dma_cfg); - if (err) { - dev_err(&d40c->chan.dev->device, - "[%s] Failed to configure channel\n", - __func__); - goto err_config; - } + /* Fill in basic CFG register values */ + d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, + &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); - spin_unlock_irqrestore(&d40c->lock, flags); - return 0; + if (d40c->log_num != D40_PHY_CHAN) { + d40_log_cfg(&d40c->dma_cfg, + &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); - err_config: - (void) d40_free_dma(d40c); - err_alloc: + if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) + d40c->lcpa = d40c->base->lcpa_base + + d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; + else + d40c->lcpa = d40c->base->lcpa_base + + d40c->dma_cfg.dst_dev_type * + D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; + } + + /* + * Only write channel configuration to the DMA if the physical + * resource is free. In case of multiple logical channels + * on the same physical resource, only the first write is necessary. + */ + if (is_free_phy) { + err = d40_config_write(d40c); + if (err) { + dev_err(&d40c->chan.dev->device, + "[%s] Failed to configure channel\n", + __func__); + } + } +fail: spin_unlock_irqrestore(&d40c->lock, flags); - dev_err(&d40c->chan.dev->device, - "[%s] Channel allocation failed\n", __func__); - return -EINVAL; + return err; } static void d40_free_chan_resources(struct dma_chan *chan) @@ -1747,6 +1732,13 @@ static void d40_free_chan_resources(struct dma_chan *chan) int err; unsigned long flags; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Cannot free unallocated channel\n", __func__); + return; + } + + spin_lock_irqsave(&d40c->lock, flags); err = d40_free_dma(d40c); @@ -1761,15 +1753,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t size, - unsigned long flags) + unsigned long dma_flags) { struct d40_desc *d40d; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); - unsigned long flg; + unsigned long flags; int err = 0; - spin_lock_irqsave(&d40c->lock, flg); + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Channel is not allocated.\n", __func__); + return ERR_PTR(-EINVAL); + } + + spin_lock_irqsave(&d40c->lock, flags); d40d = d40_desc_get(d40c); if (d40d == NULL) { @@ -1778,9 +1776,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, goto err; } - memset(d40d, 0, sizeof(struct d40_desc)); - - d40d->txd.flags = flags; + d40d->txd.flags = dma_flags; dma_async_tx_descriptor_init(&d40d->txd, chan); @@ -1794,6 +1790,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, goto err; } d40d->lli_len = 1; + d40d->lli_tx_len = 1; d40_log_fill_lli(d40d->lli_log.src, src, @@ -1801,7 +1798,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, 0, d40c->log_def.lcsp1, d40c->dma_cfg.src_info.data_width, - true, true); + false, true); d40_log_fill_lli(d40d->lli_log.dst, dst, @@ -1848,7 +1845,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, d40d->lli_pool.size, DMA_TO_DEVICE); } - spin_unlock_irqrestore(&d40c->lock, flg); + spin_unlock_irqrestore(&d40c->lock, flags); return &d40d->txd; err_fill_lli: @@ -1856,7 +1853,7 @@ err_fill_lli: "[%s] Failed filling in PHY LLI\n", __func__); d40_pool_lli_free(d40d); err: - spin_unlock_irqrestore(&d40c->lock, flg); + spin_unlock_irqrestore(&d40c->lock, flags); return NULL; } @@ -1865,11 +1862,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, - unsigned long flags) + unsigned long dma_flags) { dma_addr_t dev_addr = 0; int total_size; - int lli_max = d40c->base->plat_data->llis_per_log; if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { dev_err(&d40c->chan.dev->device, @@ -1878,7 +1874,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, } d40d->lli_len = sg_len; - d40d->lli_tcount = 0; + if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) + d40d->lli_tx_len = d40d->lli_len; + else + d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; if (sg_len > 1) /* @@ -1886,35 +1885,34 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, * If not, split list into 1-length and run only * in lcpa space. */ - if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) - lli_max = 1; + if (d40_lcla_id_get(d40c) != 0) + d40d->lli_tx_len = 1; - if (direction == DMA_FROM_DEVICE) { - dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; - total_size = d40_log_sg_to_dev(&d40c->lcla, - sgl, sg_len, - &d40d->lli_log, - &d40c->log_def, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - direction, - flags & DMA_PREP_INTERRUPT, - dev_addr, lli_max, - d40c->base->plat_data->llis_per_log); - } else if (direction == DMA_TO_DEVICE) { - dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; - total_size = d40_log_sg_to_dev(&d40c->lcla, - sgl, sg_len, - &d40d->lli_log, - &d40c->log_def, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - direction, - flags & DMA_PREP_INTERRUPT, - dev_addr, lli_max, - d40c->base->plat_data->llis_per_log); - } else + if (direction == DMA_FROM_DEVICE) + if (d40c->runtime_addr) + dev_addr = d40c->runtime_addr; + else + dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; + else if (direction == DMA_TO_DEVICE) + if (d40c->runtime_addr) + dev_addr = d40c->runtime_addr; + else + dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; + + else return -EINVAL; + + total_size = d40_log_sg_to_dev(&d40c->lcla, + sgl, sg_len, + &d40d->lli_log, + &d40c->log_def, + d40c->dma_cfg.src_info.data_width, + d40c->dma_cfg.dst_info.data_width, + direction, + dma_flags & DMA_PREP_INTERRUPT, + dev_addr, d40d->lli_tx_len, + d40c->base->plat_data->llis_per_log); + if (total_size < 0) return -EINVAL; @@ -1926,7 +1924,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, struct scatterlist *sgl, unsigned int sgl_len, enum dma_data_direction direction, - unsigned long flags) + unsigned long dma_flags) { dma_addr_t src_dev_addr; dma_addr_t dst_dev_addr; @@ -1939,13 +1937,19 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, } d40d->lli_len = sgl_len; - d40d->lli_tcount = 0; + d40d->lli_tx_len = sgl_len; if (direction == DMA_FROM_DEVICE) { dst_dev_addr = 0; - src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; + if (d40c->runtime_addr) + src_dev_addr = d40c->runtime_addr; + else + src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; } else if (direction == DMA_TO_DEVICE) { - dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; + if (d40c->runtime_addr) + dst_dev_addr = d40c->runtime_addr; + else + dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; src_dev_addr = 0; } else return -EINVAL; @@ -1983,34 +1987,38 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, - unsigned long flags) + unsigned long dma_flags) { struct d40_desc *d40d; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); - unsigned long flg; + unsigned long flags; int err; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Cannot prepare unallocated channel\n", __func__); + return ERR_PTR(-EINVAL); + } + if (d40c->dma_cfg.pre_transfer) d40c->dma_cfg.pre_transfer(chan, d40c->dma_cfg.pre_transfer_data, sg_dma_len(sgl)); - spin_lock_irqsave(&d40c->lock, flg); + spin_lock_irqsave(&d40c->lock, flags); d40d = d40_desc_get(d40c); - spin_unlock_irqrestore(&d40c->lock, flg); + spin_unlock_irqrestore(&d40c->lock, flags); if (d40d == NULL) return NULL; - memset(d40d, 0, sizeof(struct d40_desc)); - if (d40c->log_num != D40_PHY_CHAN) err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, - direction, flags); + direction, dma_flags); else err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, - direction, flags); + direction, dma_flags); if (err) { dev_err(&d40c->chan.dev->device, "[%s] Failed to prepare %s slave sg job: %d\n", @@ -2019,7 +2027,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, return NULL; } - d40d->txd.flags = flags; + d40d->txd.flags = dma_flags; dma_async_tx_descriptor_init(&d40d->txd, chan); @@ -2037,6 +2045,13 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, dma_cookie_t last_complete; int ret; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Cannot read status of unallocated channel\n", + __func__); + return -EINVAL; + } + last_complete = d40c->completed; last_used = chan->cookie; @@ -2056,6 +2071,12 @@ static void d40_issue_pending(struct dma_chan *chan) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); unsigned long flags; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Channel is not allocated!\n", __func__); + return; + } + spin_lock_irqsave(&d40c->lock, flags); /* Busy means that pending jobs are already being processed */ @@ -2065,12 +2086,129 @@ static void d40_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&d40c->lock, flags); } +/* Runtime reconfiguration extension */ +static void d40_set_runtime_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; + enum dma_slave_buswidth config_addr_width; + dma_addr_t config_addr; + u32 config_maxburst; + enum stedma40_periph_data_width addr_width; + int psize; + + if (config->direction == DMA_FROM_DEVICE) { + dma_addr_t dev_addr_rx = + d40c->base->plat_data->dev_rx[cfg->src_dev_type]; + + config_addr = config->src_addr; + if (dev_addr_rx) + dev_dbg(d40c->base->dev, + "channel has a pre-wired RX address %08x " + "overriding with %08x\n", + dev_addr_rx, config_addr); + if (cfg->dir != STEDMA40_PERIPH_TO_MEM) + dev_dbg(d40c->base->dev, + "channel was not configured for peripheral " + "to memory transfer (%d) overriding\n", + cfg->dir); + cfg->dir = STEDMA40_PERIPH_TO_MEM; + + config_addr_width = config->src_addr_width; + config_maxburst = config->src_maxburst; + + } else if (config->direction == DMA_TO_DEVICE) { + dma_addr_t dev_addr_tx = + d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; + + config_addr = config->dst_addr; + if (dev_addr_tx) + dev_dbg(d40c->base->dev, + "channel has a pre-wired TX address %08x " + "overriding with %08x\n", + dev_addr_tx, config_addr); + if (cfg->dir != STEDMA40_MEM_TO_PERIPH) + dev_dbg(d40c->base->dev, + "channel was not configured for memory " + "to peripheral transfer (%d) overriding\n", + cfg->dir); + cfg->dir = STEDMA40_MEM_TO_PERIPH; + + config_addr_width = config->dst_addr_width; + config_maxburst = config->dst_maxburst; + + } else { + dev_err(d40c->base->dev, + "unrecognized channel direction %d\n", + config->direction); + return; + } + + switch (config_addr_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + addr_width = STEDMA40_BYTE_WIDTH; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + addr_width = STEDMA40_HALFWORD_WIDTH; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + addr_width = STEDMA40_WORD_WIDTH; + break; + case DMA_SLAVE_BUSWIDTH_8_BYTES: + addr_width = STEDMA40_DOUBLEWORD_WIDTH; + break; + default: + dev_err(d40c->base->dev, + "illegal peripheral address width " + "requested (%d)\n", + config->src_addr_width); + return; + } + + if (config_maxburst >= 16) + psize = STEDMA40_PSIZE_LOG_16; + else if (config_maxburst >= 8) + psize = STEDMA40_PSIZE_LOG_8; + else if (config_maxburst >= 4) + psize = STEDMA40_PSIZE_LOG_4; + else + psize = STEDMA40_PSIZE_LOG_1; + + /* Set up all the endpoint configs */ + cfg->src_info.data_width = addr_width; + cfg->src_info.psize = psize; + cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN; + cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; + cfg->dst_info.data_width = addr_width; + cfg->dst_info.psize = psize; + cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN; + cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; + + /* These settings will take precedence later */ + d40c->runtime_addr = config_addr; + d40c->runtime_direction = config->direction; + dev_dbg(d40c->base->dev, + "configured channel %s for %s, data width %d, " + "maxburst %d bytes, LE, no flow control\n", + dma_chan_name(chan), + (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", + config_addr_width, + config_maxburst); +} + static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { unsigned long flags; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Channel is not allocated!\n", __func__); + return -EINVAL; + } + switch (cmd) { case DMA_TERMINATE_ALL: spin_lock_irqsave(&d40c->lock, flags); @@ -2081,6 +2219,12 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return d40_pause(chan); case DMA_RESUME: return d40_resume(chan); + case DMA_SLAVE_CONFIG: + d40_set_runtime_config(chan, + (struct dma_slave_config *) arg); + return 0; + default: + break; } /* Other commands are unimplemented */ @@ -2111,13 +2255,10 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, d40c->log_num = D40_PHY_CHAN; - INIT_LIST_HEAD(&d40c->free); INIT_LIST_HEAD(&d40c->active); INIT_LIST_HEAD(&d40c->queue); INIT_LIST_HEAD(&d40c->client); - d40c->free_len = 0; - tasklet_init(&d40c->tasklet, dma_tasklet, (unsigned long) d40c); @@ -2243,6 +2384,14 @@ static int __init d40_phy_res_init(struct d40_base *base) } spin_lock_init(&base->phy_res[i].lock); } + + /* Mark disabled channels as occupied */ + for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { + base->phy_res[i].allocated_src = D40_ALLOC_PHY; + base->phy_res[i].allocated_dst = D40_ALLOC_PHY; + num_phy_chans_avail--; + } + dev_info(base->dev, "%d of %d physical DMA channels available\n", num_phy_chans_avail, base->num_phy_chans); @@ -2291,6 +2440,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) int num_log_chans = 0; int num_phy_chans; int i; + u32 val; clk = clk_get(&pdev->dev, NULL); @@ -2329,12 +2479,13 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) } } - i = readl(virtbase + D40_DREG_PERIPHID2); + /* Get silicon revision */ + val = readl(virtbase + D40_DREG_PERIPHID2); - if ((i & 0xf) != D40_PERIPHID2_DESIGNER) { + if ((val & 0xf) != D40_PERIPHID2_DESIGNER) { dev_err(&pdev->dev, "[%s] Unknown designer! Got %x wanted %x\n", - __func__, i & 0xf, D40_PERIPHID2_DESIGNER); + __func__, val & 0xf, D40_PERIPHID2_DESIGNER); goto failure; } @@ -2342,7 +2493,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", - (i >> 4) & 0xf, res->start); + (val >> 4) & 0xf, res->start); plat_data = pdev->dev.platform_data; @@ -2364,6 +2515,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) goto failure; } + base->rev = (val >> 4) & 0xf; base->clk = clk; base->num_phy_chans = num_phy_chans; base->num_log_chans = num_log_chans; @@ -2402,6 +2554,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if (!base->lcla_pool.alloc_map) goto failure; + base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), + 0, SLAB_HWCACHE_ALIGN, + NULL); + if (base->desc_slab == NULL) + goto failure; + return base; failure: @@ -2495,6 +2653,78 @@ static void __init d40_hw_init(struct d40_base *base) } +static int __init d40_lcla_allocate(struct d40_base *base) +{ + unsigned long *page_list; + int i, j; + int ret = 0; + + /* + * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, + * To full fill this hardware requirement without wasting 256 kb + * we allocate pages until we get an aligned one. + */ + page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, + GFP_KERNEL); + + if (!page_list) { + ret = -ENOMEM; + goto failure; + } + + /* Calculating how many pages that are required */ + base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; + + for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { + page_list[i] = __get_free_pages(GFP_KERNEL, + base->lcla_pool.pages); + if (!page_list[i]) { + + dev_err(base->dev, + "[%s] Failed to allocate %d pages.\n", + __func__, base->lcla_pool.pages); + + for (j = 0; j < i; j++) + free_pages(page_list[j], base->lcla_pool.pages); + goto failure; + } + + if ((virt_to_phys((void *)page_list[i]) & + (LCLA_ALIGNMENT - 1)) == 0) + break; + } + + for (j = 0; j < i; j++) + free_pages(page_list[j], base->lcla_pool.pages); + + if (i < MAX_LCLA_ALLOC_ATTEMPTS) { + base->lcla_pool.base = (void *)page_list[i]; + } else { + /* After many attempts, no succees with finding the correct + * alignment try with allocating a big buffer */ + dev_warn(base->dev, + "[%s] Failed to get %d pages @ 18 bit align.\n", + __func__, base->lcla_pool.pages); + base->lcla_pool.base_unaligned = kmalloc(SZ_1K * + base->num_phy_chans + + LCLA_ALIGNMENT, + GFP_KERNEL); + if (!base->lcla_pool.base_unaligned) { + ret = -ENOMEM; + goto failure; + } + + base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, + LCLA_ALIGNMENT); + } + + writel(virt_to_phys(base->lcla_pool.base), + base->virtbase + D40_DREG_LCLA); +failure: + kfree(page_list); + return ret; +} + static int __init d40_probe(struct platform_device *pdev) { int err; @@ -2554,41 +2784,11 @@ static int __init d40_probe(struct platform_device *pdev) __func__); goto failure; } - /* Get IO for logical channel link address */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla"); - if (!res) { - ret = -ENOENT; - dev_err(&pdev->dev, - "[%s] No \"lcla\" resource defined\n", - __func__); - goto failure; - } - base->lcla_pool.base_size = resource_size(res); - base->lcla_pool.phy = res->start; - - if (request_mem_region(res->start, resource_size(res), - D40_NAME " I/O lcla") == NULL) { - ret = -EBUSY; - dev_err(&pdev->dev, - "[%s] Failed to request LCLA region 0x%x-0x%x\n", - __func__, res->start, res->end); - goto failure; - } - val = readl(base->virtbase + D40_DREG_LCLA); - if (res->start != val && val != 0) { - dev_warn(&pdev->dev, - "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n", - __func__, val, res->start); - } else - writel(res->start, base->virtbase + D40_DREG_LCLA); - - base->lcla_pool.base = ioremap(res->start, resource_size(res)); - if (!base->lcla_pool.base) { - ret = -ENOMEM; - dev_err(&pdev->dev, - "[%s] Failed to ioremap LCLA 0x%x-0x%x\n", - __func__, res->start, res->end); + ret = d40_lcla_allocate(base); + if (ret) { + dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", + __func__); goto failure; } @@ -2616,11 +2816,15 @@ static int __init d40_probe(struct platform_device *pdev) failure: if (base) { + if (base->desc_slab) + kmem_cache_destroy(base->desc_slab); if (base->virtbase) iounmap(base->virtbase); - if (base->lcla_pool.phy) - release_mem_region(base->lcla_pool.phy, - base->lcla_pool.base_size); + if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) + free_pages((unsigned long)base->lcla_pool.base, + base->lcla_pool.pages); + if (base->lcla_pool.base_unaligned) + kfree(base->lcla_pool.base_unaligned); if (base->phy_lcpa) release_mem_region(base->phy_lcpa, base->lcpa_size); |