summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/usb/musb/musb_host.c154
1 files changed, 92 insertions, 62 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 29b5149f575..2f6f9a89802 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -892,6 +892,73 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
}
}
+/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
+ * the end; avoids starvation for other endpoints.
+ */
+static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
+ int is_in)
+{
+ struct dma_channel *dma;
+ struct urb *urb;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *epio = ep->regs;
+ struct musb_qh *cur_qh, *next_qh;
+ u16 rx_csr, tx_csr;
+
+ musb_ep_select(mbase, ep->epnum);
+ if (is_in) {
+ dma = is_dma_capable() ? ep->rx_channel : NULL;
+
+ /* clear nak timeout bit */
+ rx_csr = musb_readw(epio, MUSB_RXCSR);
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
+ cur_qh = first_qh(&musb->in_bulk);
+ } else {
+ dma = is_dma_capable() ? ep->tx_channel : NULL;
+
+ /* clear nak timeout bit */
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+ tx_csr |= MUSB_TXCSR_H_WZC_BITS;
+ tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+
+ cur_qh = first_qh(&musb->out_bulk);
+ }
+ if (cur_qh) {
+ urb = next_urb(cur_qh);
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ urb->actual_length += dma->actual_len;
+ dma->actual_len = 0L;
+ }
+ musb_save_toggle(cur_qh, is_in, urb);
+
+ if (is_in) {
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->in_bulk);
+
+ /* get the next qh from musb->in_bulk */
+ next_qh = first_qh(&musb->in_bulk);
+
+ /* set rx_reinit and schedule the next qh */
+ ep->rx_reinit = 1;
+ } else {
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->out_bulk);
+
+ /* get the next qh from musb->out_bulk */
+ next_qh = first_qh(&musb->out_bulk);
+
+ /* set tx_reinit and schedule the next qh */
+ ep->tx_reinit = 1;
+ }
+ musb_start_urb(musb, is_in, next_qh);
+ }
+}
/*
* Service the default endpoint (ep0) as host.
@@ -1156,21 +1223,28 @@ void musb_host_tx(struct musb *musb, u8 epnum)
status = -ETIMEDOUT;
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
- dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
-
- /* NOTE: this code path would be a good place to PAUSE a
- * transfer, if there's some other (nonperiodic) tx urb
- * that could use this fifo. (dma complicates it...)
- * That's already done for bulk RX transfers.
- *
- * if (bulk && qh->ring.next != &musb->out_bulk), then
- * we have a candidate... NAKing is *NOT* an error
- */
- musb_ep_select(mbase, epnum);
- musb_writew(epio, MUSB_TXCSR,
- MUSB_TXCSR_H_WZC_BITS
- | MUSB_TXCSR_TXPKTRDY);
- return;
+ if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
+ && !list_is_singular(&musb->out_bulk)) {
+ dev_dbg(musb->controller,
+ "NAK timeout on TX%d ep\n", epnum);
+ musb_bulk_nak_timeout(musb, hw_ep, 0);
+ } else {
+ dev_dbg(musb->controller,
+ "TX end=%d device not responding\n", epnum);
+ /* NOTE: this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) tx urb
+ * that could use this fifo. (dma complicates it...)
+ * That's already done for bulk RX transfers.
+ *
+ * if (bulk && qh->ring.next != &musb->out_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS
+ | MUSB_TXCSR_TXPKTRDY);
+ }
+ return;
}
if (status) {
@@ -1390,50 +1464,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
#endif
-/* Schedule next QH from musb->in_bulk and move the current qh to
- * the end; avoids starvation for other endpoints.
- */
-static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
-{
- struct dma_channel *dma;
- struct urb *urb;
- void __iomem *mbase = musb->mregs;
- void __iomem *epio = ep->regs;
- struct musb_qh *cur_qh, *next_qh;
- u16 rx_csr;
-
- musb_ep_select(mbase, ep->epnum);
- dma = is_dma_capable() ? ep->rx_channel : NULL;
-
- /* clear nak timeout bit */
- rx_csr = musb_readw(epio, MUSB_RXCSR);
- rx_csr |= MUSB_RXCSR_H_WZC_BITS;
- rx_csr &= ~MUSB_RXCSR_DATAERROR;
- musb_writew(epio, MUSB_RXCSR, rx_csr);
-
- cur_qh = first_qh(&musb->in_bulk);
- if (cur_qh) {
- urb = next_urb(cur_qh);
- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- dma->status = MUSB_DMA_STATUS_CORE_ABORT;
- musb->dma_controller->channel_abort(dma);
- urb->actual_length += dma->actual_len;
- dma->actual_len = 0L;
- }
- musb_save_toggle(cur_qh, 1, urb);
-
- /* move cur_qh to end of queue */
- list_move_tail(&cur_qh->ring, &musb->in_bulk);
-
- /* get the next qh from musb->in_bulk */
- next_qh = first_qh(&musb->in_bulk);
-
- /* set rx_reinit and schedule the next qh */
- ep->rx_reinit = 1;
- musb_start_urb(musb, 1, next_qh);
- }
-}
-
/*
* Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
* and high-bandwidth IN transfer cases.
@@ -1510,7 +1540,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
if (usb_pipebulk(urb->pipe)
&& qh->mux == 1
&& !list_is_singular(&musb->in_bulk)) {
- musb_bulk_rx_nak_timeout(musb, hw_ep);
+ musb_bulk_nak_timeout(musb, hw_ep, 1);
return;
}
musb_ep_select(mbase, epnum);
@@ -1873,14 +1903,14 @@ static int musb_schedule(
else
head = &musb->out_bulk;
- /* Enable bulk RX NAK timeout scheme when bulk requests are
+ /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
* multiplexed. This scheme doen't work in high speed to full
* speed scenario as NAK interrupts are not coming from a
* full speed device connected to a high speed device.
* NAK timeout interval is 8 (128 uframe or 16ms) for HS and
* 4 (8 frame or 8ms) for FS device.
*/
- if (is_in && qh->dev)
+ if (qh->dev)
qh->intv_reg =
(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
goto success;