diff options
Diffstat (limited to 'drivers/tty/serial/amba-pl011.c')
-rw-r--r-- | drivers/tty/serial/amba-pl011.c | 181 |
1 files changed, 158 insertions, 23 deletions
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 1c1942b5cee..8ab70a62091 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -29,6 +29,7 @@ * and hooked into this driver. */ + #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif @@ -72,32 +73,44 @@ /* There is by now at least one vendor with differing details, so handle it */ struct vendor_data { unsigned int ifls; - unsigned int fifosize; unsigned int lcrh_tx; unsigned int lcrh_rx; bool oversampling; bool dma_threshold; bool cts_event_workaround; + + unsigned int (*get_fifosize)(unsigned int periphid); }; +static unsigned int get_fifosize_arm(unsigned int periphid) +{ + unsigned int rev = (periphid >> 20) & 0xf; + return rev < 3 ? 16 : 32; +} + static struct vendor_data vendor_arm = { .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, - .fifosize = 16, .lcrh_tx = UART011_LCRH, .lcrh_rx = UART011_LCRH, .oversampling = false, .dma_threshold = false, .cts_event_workaround = false, + .get_fifosize = get_fifosize_arm, }; +static unsigned int get_fifosize_st(unsigned int periphid) +{ + return 64; +} + static struct vendor_data vendor_st = { .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, - .fifosize = 64, .lcrh_tx = ST_UART011_LCRH_TX, .lcrh_rx = ST_UART011_LCRH_RX, .oversampling = true, .dma_threshold = true, .cts_event_workaround = true, + .get_fifosize = get_fifosize_st, }; static struct uart_amba_port *amba_ports[UART_NR]; @@ -117,6 +130,12 @@ struct pl011_dmarx_data { struct pl011_sgbuf sgbuf_b; dma_cookie_t cookie; bool running; + struct timer_list timer; + unsigned int last_residue; + unsigned long last_jiffies; + bool auto_poll_rate; + unsigned int poll_rate; + unsigned int poll_timeout; }; struct pl011_dmatx_data { @@ -223,16 +242,18 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap) static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, enum dma_data_direction dir) { - sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); + dma_addr_t dma_addr; + + sg->buf = dma_alloc_coherent(chan->device->dev, + PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL); if (!sg->buf) return -ENOMEM; - sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE); + sg_init_table(&sg->sg, 1); + sg_set_page(&sg->sg, phys_to_page(dma_addr), + PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr)); + sg_dma_address(&sg->sg) = dma_addr; - if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) { - kfree(sg->buf); - return -EINVAL; - } return 0; } @@ -240,8 +261,9 @@ static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, enum dma_data_direction dir) { if (sg->buf) { - dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir); - kfree(sg->buf); + dma_free_coherent(chan->device->dev, + PL011_DMA_BUFFER_SIZE, sg->buf, + sg_dma_address(&sg->sg)); } } @@ -310,6 +332,29 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port * dmaengine_slave_config(chan, &rx_conf); uap->dmarx.chan = chan; + if (plat->dma_rx_poll_enable) { + /* Set poll rate if specified. */ + if (plat->dma_rx_poll_rate) { + uap->dmarx.auto_poll_rate = false; + uap->dmarx.poll_rate = plat->dma_rx_poll_rate; + } else { + /* + * 100 ms defaults to poll rate if not + * specified. This will be adjusted with + * the baud rate at set_termios. + */ + uap->dmarx.auto_poll_rate = true; + uap->dmarx.poll_rate = 100; + } + /* 3 secs defaults poll_timeout if not specified. */ + if (plat->dma_rx_poll_timeout) + uap->dmarx.poll_timeout = + plat->dma_rx_poll_timeout; + else + uap->dmarx.poll_timeout = 3000; + } else + uap->dmarx.auto_poll_rate = false; + dev_info(uap->port.dev, "DMA channel RX %s\n", dma_chan_name(uap->dmarx.chan)); } @@ -713,24 +758,30 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap, struct tty_port *port = &uap->port.state->port; struct pl011_sgbuf *sgbuf = use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; - struct device *dev = uap->dmarx.chan->device->dev; int dma_count = 0; u32 fifotaken = 0; /* only used for vdbg() */ - /* Pick everything from the DMA first */ + struct pl011_dmarx_data *dmarx = &uap->dmarx; + int dmataken = 0; + + if (uap->dmarx.poll_rate) { + /* The data can be taken by polling */ + dmataken = sgbuf->sg.length - dmarx->last_residue; + /* Recalculate the pending size */ + if (pending >= dmataken) + pending -= dmataken; + } + + /* Pick the remain data from the DMA */ if (pending) { - /* Sync in buffer */ - dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); /* * First take all chars in the DMA pipe, then look in the FIFO. * Note that tty_insert_flip_buf() tries to take as many chars * as it can. */ - dma_count = tty_insert_flip_string(port, sgbuf->buf, pending); - - /* Return buffer to device */ - dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); + dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, + pending); uap->port.icount.rx += dma_count; if (dma_count < pending) @@ -738,6 +789,10 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap, "couldn't insert all characters (TTY is full?)\n"); } + /* Reset the last_residue for Rx DMA poll */ + if (uap->dmarx.poll_rate) + dmarx->last_residue = sgbuf->sg.length; + /* * Only continue with trying to read the FIFO if all DMA chars have * been taken first. @@ -877,6 +932,57 @@ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) writew(uap->dmacr, uap->port.membase + UART011_DMACR); } +/* + * Timer handler for Rx DMA polling. + * Every polling, It checks the residue in the dma buffer and transfer + * data to the tty. Also, last_residue is updated for the next polling. + */ +static void pl011_dma_rx_poll(unsigned long args) +{ + struct uart_amba_port *uap = (struct uart_amba_port *)args; + struct tty_port *port = &uap->port.state->port; + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_chan *rxchan = uap->dmarx.chan; + unsigned long flags = 0; + unsigned int dmataken = 0; + unsigned int size = 0; + struct pl011_sgbuf *sgbuf; + int dma_count; + struct dma_tx_state state; + + sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; + rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); + if (likely(state.residue < dmarx->last_residue)) { + dmataken = sgbuf->sg.length - dmarx->last_residue; + size = dmarx->last_residue - state.residue; + dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, + size); + if (dma_count == size) + dmarx->last_residue = state.residue; + dmarx->last_jiffies = jiffies; + } + tty_flip_buffer_push(port); + + /* + * If no data is received in poll_timeout, the driver will fall back + * to interrupt mode. We will retrigger DMA at the first interrupt. + */ + if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) + > uap->dmarx.poll_timeout) { + + spin_lock_irqsave(&uap->port.lock, flags); + pl011_dma_rx_stop(uap); + spin_unlock_irqrestore(&uap->port.lock, flags); + + uap->dmarx.running = false; + dmaengine_terminate_all(rxchan); + del_timer(&uap->dmarx.timer); + } else { + mod_timer(&uap->dmarx.timer, + jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); + } +} + static void pl011_dma_startup(struct uart_amba_port *uap) { int ret; @@ -939,6 +1045,16 @@ skip_rx: if (pl011_dma_rx_trigger_dma(uap)) dev_dbg(uap->port.dev, "could not trigger initial " "RX DMA job, fall back to interrupt mode\n"); + if (uap->dmarx.poll_rate) { + init_timer(&(uap->dmarx.timer)); + uap->dmarx.timer.function = pl011_dma_rx_poll; + uap->dmarx.timer.data = (unsigned long)uap; + mod_timer(&uap->dmarx.timer, + jiffies + + msecs_to_jiffies(uap->dmarx.poll_rate)); + uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; + uap->dmarx.last_jiffies = jiffies; + } } } @@ -974,6 +1090,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap) /* Clean up the RX DMA */ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); + if (uap->dmarx.poll_rate) + del_timer_sync(&uap->dmarx.timer); uap->using_rx_dma = false; } } @@ -988,7 +1106,6 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) return uap->using_rx_dma && uap->dmarx.running; } - #else /* Blank functions if the DMA engine is not available */ static inline void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap) @@ -1100,8 +1217,20 @@ static void pl011_rx_chars(struct uart_amba_port *uap) dev_dbg(uap->port.dev, "could not trigger RX DMA job " "fall back to interrupt mode again\n"); uap->im |= UART011_RXIM; - } else + } else { uap->im &= ~UART011_RXIM; +#ifdef CONFIG_DMA_ENGINE + /* Start Rx DMA poll */ + if (uap->dmarx.poll_rate) { + uap->dmarx.last_jiffies = jiffies; + uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; + mod_timer(&uap->dmarx.timer, + jiffies + + msecs_to_jiffies(uap->dmarx.poll_rate)); + } +#endif + } + writew(uap->im, uap->port.membase + UART011_IMSC); } spin_lock(&uap->port.lock); @@ -1176,7 +1305,6 @@ static irqreturn_t pl011_int(int irq, void *dev_id) unsigned int dummy_read; spin_lock_irqsave(&uap->port.lock, flags); - status = readw(uap->port.membase + UART011_MIS); if (status) { do { @@ -1563,6 +1691,13 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / clkdiv); +#ifdef CONFIG_DMA_ENGINE + /* + * Adjust RX DMA polling rate with baud rate if not specified. + */ + if (uap->dmarx.auto_poll_rate) + uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); +#endif if (baud > port->uartclk/16) quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); @@ -2022,7 +2157,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) uap->lcrh_rx = vendor->lcrh_rx; uap->lcrh_tx = vendor->lcrh_tx; uap->old_cr = 0; - uap->fifosize = vendor->fifosize; + uap->fifosize = vendor->get_fifosize(dev->periphid); uap->port.dev = &dev->dev; uap->port.mapbase = dev->res.start; uap->port.membase = base; |