diff options
author | Arnd Bergmann <arnd@arndb.de> | 2012-03-04 21:09:11 +0000 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2012-03-04 21:20:46 +0000 |
commit | 709baa67c676a187a63c0f0f40efceb3fb1eef72 (patch) | |
tree | 0a2b8bab5ac4f52069a1e50d20028ae44d8c256a /arch/arm/mach-tegra/dma.c | |
parent | 281a9f78eaa5d0d78bd0e3acd181a24d3bad28e3 (diff) | |
parent | 8c690fdf465be9d97229f6bb0e6346624d6753a9 (diff) |
Merge tag 'tegra-soc2' of git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra into tegra/soc2
From: Olof Johansson <olof@lixom.net>
Tegra 30 SMP support
I did this as a separate topic branch because it depends on both the
soc and the soc-drivers branch, so it brings both of those in as a base.
This branch contains work to enable SMP support on Tegra30 and reworks
some of the SMP bringup for T20 as well.
It also contains a device tree patch that builds on top of the SMP/clock
changes in the rest of the branch, so it made more sense to apply it
here than deal with the merge conflicts back and forth.
* tag 'tegra-soc2' of git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra:
ARM: dt: Explicitly configure all serial ports on Tegra Cardhu
ARM: tegra: support for secondary cores on Tegra30
ARM: tegra: support for Tegra30 CPU powerdomains
ARM: tegra: add support for Tegra30 powerdomains
ARM: tegra: export tegra_powergate_is_powered()
ARM: tegra: prepare powergate.c for multiple variants
ARM: tegra: rework Tegra secondary CPU core bringup
ARM: tegra: functions to access the flowcontroller
ARM: tegra: initialize Tegra chipid early
ARM: tegra: export Tegra chipid
ARM: tegra: cleanup use of chipid register
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm/mach-tegra/dma.c')
-rw-r--r-- | arch/arm/mach-tegra/dma.c | 128 |
1 files changed, 77 insertions, 51 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c index c0cf967e47d..abea4f6e2dd 100644 --- a/arch/arm/mach-tegra/dma.c +++ b/arch/arm/mach-tegra/dma.c @@ -33,6 +33,8 @@ #include <mach/iomap.h> #include <mach/suspend.h> +#include "apbio.h" + #define APB_DMA_GEN 0x000 #define GEN_ENABLE (1<<31) @@ -50,8 +52,6 @@ #define CSR_ONCE (1<<27) #define CSR_FLOW (1<<21) #define CSR_REQ_SEL_SHIFT 16 -#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT) -#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT) #define CSR_WCOUNT_SHIFT 2 #define CSR_WCOUNT_MASK 0xFFFC @@ -133,6 +133,7 @@ struct tegra_dma_channel { static bool tegra_dma_initialized; static DEFINE_MUTEX(tegra_dma_lock); +static DEFINE_SPINLOCK(enable_lock); static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; @@ -180,36 +181,94 @@ static void tegra_dma_stop(struct tegra_dma_channel *ch) static int tegra_dma_cancel(struct tegra_dma_channel *ch) { - u32 csr; unsigned long irq_flags; spin_lock_irqsave(&ch->lock, irq_flags); while (!list_empty(&ch->list)) list_del(ch->list.next); - csr = readl(ch->addr + APB_DMA_CHAN_CSR); - csr &= ~CSR_REQ_SEL_MASK; - csr |= CSR_REQ_SEL_INVALID; - writel(csr, ch->addr + APB_DMA_CHAN_CSR); - tegra_dma_stop(ch); spin_unlock_irqrestore(&ch->lock, irq_flags); return 0; } +static unsigned int get_channel_status(struct tegra_dma_channel *ch, + struct tegra_dma_req *req, bool is_stop_dma) +{ + void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); + unsigned int status; + + if (is_stop_dma) { + /* + * STOP the DMA and get the transfer count. + * Getting the transfer count is tricky. + * - Globally disable DMA on all channels + * - Read the channel's status register to know the number + * of pending bytes to be transfered. + * - Stop the dma channel + * - Globally re-enable DMA to resume other transfers + */ + spin_lock(&enable_lock); + writel(0, addr + APB_DMA_GEN); + udelay(20); + status = readl(ch->addr + APB_DMA_CHAN_STA); + tegra_dma_stop(ch); + writel(GEN_ENABLE, addr + APB_DMA_GEN); + spin_unlock(&enable_lock); + if (status & STA_ISE_EOC) { + pr_err("Got Dma Int here clearing"); + writel(status, ch->addr + APB_DMA_CHAN_STA); + } + req->status = TEGRA_DMA_REQ_ERROR_ABORTED; + } else { + status = readl(ch->addr + APB_DMA_CHAN_STA); + } + return status; +} + +/* should be called with the channel lock held */ +static unsigned int dma_active_count(struct tegra_dma_channel *ch, + struct tegra_dma_req *req, unsigned int status) +{ + unsigned int to_transfer; + unsigned int req_transfer_count; + unsigned int bytes_transferred; + + to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1; + req_transfer_count = ch->req_transfer_count + 1; + bytes_transferred = req_transfer_count; + if (status & STA_BUSY) + bytes_transferred -= to_transfer; + /* + * In continuous transfer mode, DMA only tracks the count of the + * half DMA buffer. So, if the DMA already finished half the DMA + * then add the half buffer to the completed count. + */ + if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) { + if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) + bytes_transferred += req_transfer_count; + if (status & STA_ISE_EOC) + bytes_transferred += req_transfer_count; + } + bytes_transferred *= 4; + return bytes_transferred; +} + int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, struct tegra_dma_req *_req) { - unsigned int csr; unsigned int status; struct tegra_dma_req *req = NULL; int found = 0; unsigned long irq_flags; - int to_transfer; - int req_transfer_count; + int stop = 0; spin_lock_irqsave(&ch->lock, irq_flags); + + if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req) + stop = 1; + list_for_each_entry(req, &ch->list, node) { if (req == _req) { list_del(&req->node); @@ -222,47 +281,12 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, return 0; } - /* STOP the DMA and get the transfer count. - * Getting the transfer count is tricky. - * - Change the source selector to invalid to stop the DMA from - * FIFO to memory. - * - Read the status register to know the number of pending - * bytes to be transferred. - * - Finally stop or program the DMA to the next buffer in the - * list. - */ - csr = readl(ch->addr + APB_DMA_CHAN_CSR); - csr &= ~CSR_REQ_SEL_MASK; - csr |= CSR_REQ_SEL_INVALID; - writel(csr, ch->addr + APB_DMA_CHAN_CSR); - - /* Get the transfer count */ - status = readl(ch->addr + APB_DMA_CHAN_STA); - to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; - req_transfer_count = ch->req_transfer_count; - req_transfer_count += 1; - to_transfer += 1; - - req->bytes_transferred = req_transfer_count; - - if (status & STA_BUSY) - req->bytes_transferred -= to_transfer; - - /* In continuous transfer mode, DMA only tracks the count of the - * half DMA buffer. So, if the DMA already finished half the DMA - * then add the half buffer to the completed count. - * - * FIXME: There can be a race here. What if the req to - * dequue happens at the same time as the DMA just moved to - * the new buffer and SW didn't yet received the interrupt? - */ - if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) - if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) - req->bytes_transferred += req_transfer_count; + if (!stop) + goto skip_stop_dma; - req->bytes_transferred *= 4; + status = get_channel_status(ch, req, true); + req->bytes_transferred = dma_active_count(ch, req, status); - tegra_dma_stop(ch); if (!list_empty(&ch->list)) { /* if the list is not empty, queue the next request */ struct tegra_dma_req *next_req; @@ -270,6 +294,8 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, typeof(*next_req), node); tegra_dma_update_hw(ch, next_req); } + +skip_stop_dma: req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; spin_unlock_irqrestore(&ch->lock, irq_flags); @@ -357,7 +383,7 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) int channel; struct tegra_dma_channel *ch = NULL; - if (WARN_ON(!tegra_dma_initialized)) + if (!tegra_dma_initialized) return NULL; mutex_lock(&tegra_dma_lock); |