summaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/ohci-q.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/ohci-q.c')
-rw-r--r--drivers/usb/host/ohci-q.c262
1 files changed, 163 insertions, 99 deletions
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index d4253e31942..1463c398d32 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -187,10 +187,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
ed->ed_prev = NULL;
ed->ed_next = NULL;
ed->hwNextED = 0;
- if (quirk_zfmicro(ohci)
- && (ed->type == PIPE_INTERRUPT)
- && !(ohci->eds_scheduled++))
- mod_timer(&ohci->unlink_watchdog, round_jiffies(jiffies + HZ));
wmb ();
/* we care about rm_list when setting CLE/BLE in case the HC was at
@@ -311,8 +307,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
* - ED_OPER: when there's any request queued, the ED gets rescheduled
* immediately. HC should be working on them.
*
- * - ED_IDLE: when there's no TD queue. there's no reason for the HC
- * to care about this ED; safe to disable the endpoint.
+ * - ED_IDLE: when there's no TD queue or the HC isn't running.
*
* When finish_unlinks() runs later, after SOF interrupt, it will often
* complete one or more URB unlinks before making that state change.
@@ -602,6 +597,8 @@ static void td_submit_urb (
u32 info = 0;
int is_out = usb_pipeout (urb->pipe);
int periodic = 0;
+ int i, this_sg_len, n;
+ struct scatterlist *sg;
/* OHCI handles the bulk/interrupt data toggles itself. We just
* use the device toggle bits for resetting, and rely on the fact
@@ -615,10 +612,24 @@ static void td_submit_urb (
list_add (&urb_priv->pending, &ohci->pending);
- if (data_len)
- data = urb->transfer_dma;
- else
- data = 0;
+ i = urb->num_mapped_sgs;
+ if (data_len > 0 && i > 0) {
+ sg = urb->sg;
+ data = sg_dma_address(sg);
+
+ /*
+ * urb->transfer_buffer_length may be smaller than the
+ * size of the scatterlist (or vice versa)
+ */
+ this_sg_len = min_t(int, sg_dma_len(sg), data_len);
+ } else {
+ sg = NULL;
+ if (data_len)
+ data = urb->transfer_dma;
+ else
+ data = 0;
+ this_sg_len = data_len;
+ }
/* NOTE: TD_CC is set so we can tell which TDs the HC processed by
* using TD_CC_GET, as well as by seeing them on the done list.
@@ -639,17 +650,29 @@ static void td_submit_urb (
? TD_T_TOGGLE | TD_CC | TD_DP_OUT
: TD_T_TOGGLE | TD_CC | TD_DP_IN;
/* TDs _could_ transfer up to 8K each */
- while (data_len > 4096) {
- td_fill (ohci, info, data, 4096, urb, cnt);
- data += 4096;
- data_len -= 4096;
+ for (;;) {
+ n = min(this_sg_len, 4096);
+
+ /* maybe avoid ED halt on final TD short read */
+ if (n >= data_len || (i == 1 && n >= this_sg_len)) {
+ if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
+ info |= TD_R;
+ }
+ td_fill(ohci, info, data, n, urb, cnt);
+ this_sg_len -= n;
+ data_len -= n;
+ data += n;
cnt++;
+
+ if (this_sg_len <= 0) {
+ if (--i <= 0 || data_len <= 0)
+ break;
+ sg = sg_next(sg);
+ data = sg_dma_address(sg);
+ this_sg_len = min_t(int, sg_dma_len(sg),
+ data_len);
+ }
}
- /* maybe avoid ED halt on final TD short read */
- if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
- info |= TD_R;
- td_fill (ohci, info, data, data_len, urb, cnt);
- cnt++;
if ((urb->transfer_flags & URB_ZERO_PACKET)
&& cnt < urb_priv->length) {
td_fill (ohci, info, 0, 0, urb, cnt);
@@ -869,13 +892,46 @@ static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
}
}
-/* replies to the request have to be on a FIFO basis so
- * we unreverse the hc-reversed done-list
- */
-static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
+/* Add a TD to the done list */
+static void add_to_done_list(struct ohci_hcd *ohci, struct td *td)
+{
+ struct td *td2, *td_prev;
+ struct ed *ed;
+
+ if (td->next_dl_td)
+ return; /* Already on the list */
+
+ /* Add all the TDs going back until we reach one that's on the list */
+ ed = td->ed;
+ td2 = td_prev = td;
+ list_for_each_entry_continue_reverse(td2, &ed->td_list, td_list) {
+ if (td2->next_dl_td)
+ break;
+ td2->next_dl_td = td_prev;
+ td_prev = td2;
+ }
+
+ if (ohci->dl_end)
+ ohci->dl_end->next_dl_td = td_prev;
+ else
+ ohci->dl_start = td_prev;
+
+ /*
+ * Make td->next_dl_td point to td itself, to mark the fact
+ * that td is on the done list.
+ */
+ ohci->dl_end = td->next_dl_td = td;
+
+ /* Did we just add the latest pending TD? */
+ td2 = ed->pending_td;
+ if (td2 && td2->next_dl_td)
+ ed->pending_td = NULL;
+}
+
+/* Get the entries on the hardware done queue and put them on our list */
+static void update_done_list(struct ohci_hcd *ohci)
{
u32 td_dma;
- struct td *td_rev = NULL;
struct td *td = NULL;
td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
@@ -883,7 +939,7 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
wmb();
/* get TD from hc's singly linked list, and
- * prepend to ours. ed->td_list changes later.
+ * add to ours. ed->td_list changes later.
*/
while (td_dma) {
int cc;
@@ -905,19 +961,17 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
&& (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
ed_halted(ohci, td, cc);
- td->next_dl_td = td_rev;
- td_rev = td;
td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
+ add_to_done_list(ohci, td);
}
- return td_rev;
}
/*-------------------------------------------------------------------------*/
/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
-static void
-finish_unlinks (struct ohci_hcd *ohci, u16 tick)
+static void finish_unlinks(struct ohci_hcd *ohci)
{
+ unsigned tick = ohci_frame_no(ohci);
struct ed *ed, **last;
rescan_all:
@@ -926,41 +980,48 @@ rescan_all:
int completed, modified;
__hc32 *prev;
+ /* Is this ED already invisible to the hardware? */
+ if (ed->state == ED_IDLE)
+ goto ed_idle;
+
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
- if (likely(ohci->rh_state == OHCI_RH_RUNNING)) {
- if (tick_before (tick, ed->tick)) {
+ if (likely(ohci->rh_state == OHCI_RH_RUNNING) &&
+ tick_before(tick, ed->tick)) {
skip_ed:
- last = &ed->ed_next;
- continue;
- }
+ last = &ed->ed_next;
+ continue;
+ }
+ if (!list_empty(&ed->td_list)) {
+ struct td *td;
+ u32 head;
- if (!list_empty (&ed->td_list)) {
- struct td *td;
- u32 head;
-
- td = list_entry (ed->td_list.next, struct td,
- td_list);
- head = hc32_to_cpu (ohci, ed->hwHeadP) &
- TD_MASK;
-
- /* INTR_WDH may need to clean up first */
- if (td->td_dma != head) {
- if (ed == ohci->ed_to_check)
- ohci->ed_to_check = NULL;
- else
- goto skip_ed;
- }
- }
+ td = list_first_entry(&ed->td_list, struct td, td_list);
+
+ /* INTR_WDH may need to clean up first */
+ head = hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK;
+ if (td->td_dma != head &&
+ ohci->rh_state == OHCI_RH_RUNNING)
+ goto skip_ed;
+
+ /* Don't mess up anything already on the done list */
+ if (td->next_dl_td)
+ goto skip_ed;
}
+ /* ED's now officially unlinked, hc doesn't see */
+ ed->state = ED_IDLE;
+ ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
+ ed->hwNextED = 0;
+ wmb();
+ ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
+ed_idle:
+
/* reentrancy: if we drop the schedule lock, someone might
* have modified this list. normally it's just prepending
* entries (which we'd ignore), but paranoia won't hurt.
*/
- *last = ed->ed_next;
- ed->ed_next = NULL;
modified = 0;
/* unlink urbs as requested, but rescan the list after
@@ -1018,19 +1079,21 @@ rescan_this:
if (completed && !list_empty (&ed->td_list))
goto rescan_this;
- /* ED's now officially unlinked, hc doesn't see */
- ed->state = ED_IDLE;
- if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
- ohci->eds_scheduled--;
- ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
- ed->hwNextED = 0;
- wmb ();
- ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
-
- /* but if there's work queued, reschedule */
- if (!list_empty (&ed->td_list)) {
- if (ohci->rh_state == OHCI_RH_RUNNING)
- ed_schedule (ohci, ed);
+ /*
+ * If no TDs are queued, take ED off the ed_rm_list.
+ * Otherwise, if the HC is running, reschedule.
+ * If not, leave it on the list for further dequeues.
+ */
+ if (list_empty(&ed->td_list)) {
+ *last = ed->ed_next;
+ ed->ed_next = NULL;
+ list_del(&ed->in_use_list);
+ } else if (ohci->rh_state == OHCI_RH_RUNNING) {
+ *last = ed->ed_next;
+ ed->ed_next = NULL;
+ ed_schedule(ohci, ed);
+ } else {
+ last = &ed->ed_next;
}
if (modified)
@@ -1082,12 +1145,7 @@ rescan_this:
/*-------------------------------------------------------------------------*/
-/*
- * Used to take back a TD from the host controller. This would normally be
- * called from within dl_done_list, however it may be called directly if the
- * HC no longer sees the TD and it has not appeared on the donelist (after
- * two frames). This bug has been observed on ZF Micro systems.
- */
+/* Take back a TD from the host controller */
static void takeback_td(struct ohci_hcd *ohci, struct td *td)
{
struct urb *urb = td->urb;
@@ -1134,37 +1192,43 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
*
* This is the main path for handing urbs back to drivers. The only other
* normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
- * instead of scanning the (re-reversed) donelist as this does. There's
- * an abnormal path too, handling a quirk in some Compaq silicon: URBs
- * with TDs that appear to be orphaned are directly reclaimed.
+ * instead of scanning the (re-reversed) donelist as this does.
*/
-static void
-dl_done_list (struct ohci_hcd *ohci)
+static void process_done_list(struct ohci_hcd *ohci)
{
- struct td *td = dl_reverse_done_list (ohci);
+ struct td *td;
- while (td) {
- struct td *td_next = td->next_dl_td;
- struct ed *ed = td->ed;
+ while (ohci->dl_start) {
+ td = ohci->dl_start;
+ if (td == ohci->dl_end)
+ ohci->dl_start = ohci->dl_end = NULL;
+ else
+ ohci->dl_start = td->next_dl_td;
- /*
- * Some OHCI controllers (NVIDIA for sure, maybe others)
- * occasionally forget to add TDs to the done queue. Since
- * TDs for a given endpoint are always processed in order,
- * if we find a TD on the donelist then all of its
- * predecessors must be finished as well.
- */
- for (;;) {
- struct td *td2;
+ takeback_td(ohci, td);
+ }
+}
- td2 = list_first_entry(&ed->td_list, struct td,
- td_list);
- if (td2 == td)
- break;
- takeback_td(ohci, td2);
- }
+/*
+ * TD takeback and URB giveback must be single-threaded.
+ * This routine takes care of it all.
+ */
+static void ohci_work(struct ohci_hcd *ohci)
+{
+ if (ohci->working) {
+ ohci->restart_work = 1;
+ return;
+ }
+ ohci->working = 1;
- takeback_td(ohci, td);
- td = td_next;
+ restart:
+ process_done_list(ohci);
+ if (ohci->ed_rm_list)
+ finish_unlinks(ohci);
+
+ if (ohci->restart_work) {
+ ohci->restart_work = 0;
+ goto restart;
}
+ ohci->working = 0;
}