Commit c6fcb85e authored by Alan Stern's avatar Alan Stern Committed by Greg Kroah-Hartman
Browse files

USB: OHCI: redesign the TD done list



This patch changes the way ohci-hcd handles the TD done list.  In
addition to relying on the TD pointers stored by the controller
hardware, we need to handle TDs that the hardware has forgotten about.

This means the list has to exist even while the dl_done_list() routine
isn't running.  That function essentially gets split in two:
update_done_list() reads the TD pointers stored by the hardware and
adds the TDs to the done list, and process_done_list() scans through
the list to handle URB completions.  When we detect a TD that the
hardware forgot about, we will be able to add it to the done list
manually and then process it normally.

Since the list is really a queue, and because there can be a lot of
TDs, keep the existing singly linked implementation.  To insure that
URBs are given back in order of submission, whenever a TD is added to
the done list, all the preceding TDs for the same endpoint must be
added as well (going back to the first one that isn't already on the
done list).

The done list manipulations must all be protected by the private
lock.  The scope of the lock is expanded in preparation for the
watchdog routine to be added in a later patch.

We have to be more careful about giving back unlinked URBs.  Since TDs
may be added to the done list by the watchdog routine and not in
response to a controller interrupt, we have to check explicitly to
make sure all the URB's TDs that were added to the done list have been
processed before giving back the URB.
Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8b3ab0ed
......@@ -780,24 +780,21 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
usb_hcd_resume_root_hub(hcd);
}
if (ints & OHCI_INTR_WDH) {
spin_lock (&ohci->lock);
dl_done_list (ohci);
spin_unlock (&ohci->lock);
}
spin_lock(&ohci->lock);
if (ints & OHCI_INTR_WDH)
update_done_list(ohci);
/* could track INTR_SO to reduce available PCI/... bandwidth */
/* handle any pending URB/ED unlinks, leaving INTR_SF enabled
* when there's still unlinking to be done (next frame).
*/
spin_lock (&ohci->lock);
process_done_list(ohci);
if (ohci->ed_rm_list)
finish_unlinks (ohci, ohci_frame_no(ohci));
if ((ints & OHCI_INTR_SF) != 0 && !ohci->ed_rm_list
&& ohci->rh_state == OHCI_RH_RUNNING)
ohci_writel (ohci, OHCI_INTR_SF, &regs->intrdisable);
spin_unlock (&ohci->lock);
if (ohci->rh_state == OHCI_RH_RUNNING) {
ohci_writel (ohci, ints, &regs->intrstatus);
......@@ -805,6 +802,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
}
spin_unlock(&ohci->lock);
return IRQ_HANDLED;
}
......
......@@ -39,7 +39,8 @@
#define OHCI_SCHED_ENABLES \
(OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE)
static void dl_done_list (struct ohci_hcd *);
static void update_done_list(struct ohci_hcd *);
static void process_done_list(struct ohci_hcd *);
static void finish_unlinks (struct ohci_hcd *, u16);
#ifdef CONFIG_PM
......@@ -87,7 +88,8 @@ __acquires(ohci->lock)
msleep (8);
spin_lock_irq (&ohci->lock);
}
dl_done_list (ohci);
update_done_list(ohci);
process_done_list(ohci);
finish_unlinks (ohci, ohci_frame_no(ohci));
/*
......
......@@ -892,13 +892,41 @@ static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
}
}
/* replies to the request have to be on a FIFO basis so
* we unreverse the hc-reversed done-list
*/
static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
/* Add a TD to the done list */
static void add_to_done_list(struct ohci_hcd *ohci, struct td *td)
{
struct td *td2, *td_prev;
struct ed *ed;
if (td->next_dl_td)
return; /* Already on the list */
/* Add all the TDs going back until we reach one that's on the list */
ed = td->ed;
td2 = td_prev = td;
list_for_each_entry_continue_reverse(td2, &ed->td_list, td_list) {
if (td2->next_dl_td)
break;
td2->next_dl_td = td_prev;
td_prev = td2;
}
if (ohci->dl_end)
ohci->dl_end->next_dl_td = td_prev;
else
ohci->dl_start = td_prev;
/*
* Make td->next_dl_td point to td itself, to mark the fact
* that td is on the done list.
*/
ohci->dl_end = td->next_dl_td = td;
}
/* Get the entries on the hardware done queue and put them on our list */
static void update_done_list(struct ohci_hcd *ohci)
{
u32 td_dma;
struct td *td_rev = NULL;
struct td *td = NULL;
td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
......@@ -906,7 +934,7 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
wmb();
/* get TD from hc's singly linked list, and
* prepend to ours. ed->td_list changes later.
* add to ours. ed->td_list changes later.
*/
while (td_dma) {
int cc;
......@@ -928,11 +956,9 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
&& (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
ed_halted(ohci, td, cc);
td->next_dl_td = td_rev;
td_rev = td;
td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
add_to_done_list(ohci, td);
}
return td_rev;
}
/*-------------------------------------------------------------------------*/
......@@ -956,26 +982,27 @@ rescan_all:
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
if (likely(ohci->rh_state == OHCI_RH_RUNNING)) {
if (tick_before (tick, ed->tick)) {
if (likely(ohci->rh_state == OHCI_RH_RUNNING) &&
tick_before(tick, ed->tick)) {
skip_ed:
last = &ed->ed_next;
continue;
}
last = &ed->ed_next;
continue;
}
if (!list_empty(&ed->td_list)) {
struct td *td;
u32 head;
if (!list_empty (&ed->td_list)) {
struct td *td;
u32 head;
td = list_first_entry(&ed->td_list, struct td, td_list);
td = list_entry (ed->td_list.next, struct td,
td_list);
head = hc32_to_cpu (ohci, ed->hwHeadP) &
TD_MASK;
/* INTR_WDH may need to clean up first */
head = hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK;
if (td->td_dma != head &&
ohci->rh_state == OHCI_RH_RUNNING)
goto skip_ed;
/* INTR_WDH may need to clean up first */
if (td->td_dma != head)
goto skip_ed;
}
/* Don't mess up anything already on the done list */
if (td->next_dl_td)
goto skip_ed;
}
/* ED's now officially unlinked, hc doesn't see */
......@@ -1161,33 +1188,17 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
* normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
* instead of scanning the (re-reversed) donelist as this does.
*/
static void
dl_done_list (struct ohci_hcd *ohci)
static void process_done_list(struct ohci_hcd *ohci)
{
struct td *td = dl_reverse_done_list (ohci);
while (td) {
struct td *td_next = td->next_dl_td;
struct ed *ed = td->ed;
struct td *td;
/*
* Some OHCI controllers (NVIDIA for sure, maybe others)
* occasionally forget to add TDs to the done queue. Since
* TDs for a given endpoint are always processed in order,
* if we find a TD on the donelist then all of its
* predecessors must be finished as well.
*/
for (;;) {
struct td *td2;
td2 = list_first_entry(&ed->td_list, struct td,
td_list);
if (td2 == td)
break;
takeback_td(ohci, td2);
}
while (ohci->dl_start) {
td = ohci->dl_start;
if (td == ohci->dl_end)
ohci->dl_start = ohci->dl_end = NULL;
else
ohci->dl_start = td->next_dl_td;
takeback_td(ohci, td);
td = td_next;
}
}
......@@ -380,6 +380,7 @@ struct ohci_hcd {
struct dma_pool *td_cache;
struct dma_pool *ed_cache;
struct td *td_hash [TD_HASH_SIZE];
struct td *dl_start, *dl_end; /* the done list */
struct list_head pending;
/*
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment