summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c420
1 files changed, 212 insertions, 208 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 965bffce301e..5d64c297721c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -204,6 +204,50 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
}
/*
+ * If enqueue points at a link TRB, follow links until an ordinary TRB is reached.
+ * Toggle the cycle bit of passed link TRBs and optionally chain them.
+ */
+static void inc_enq_past_link(struct xhci_hcd *xhci, struct xhci_ring *ring, u32 chain)
+{
+ unsigned int link_trb_count = 0;
+
+ while (trb_is_link(ring->enqueue)) {
+
+ /*
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
+ * set, but other sections talk about dealing with the chain bit set. This was
+ * fixed in the 0.96 specification errata, but we have to assume that all 0.95
+ * xHCI hardware can't handle the chain bit being cleared on a link TRB.
+ *
+ * On 0.95 and some 0.96 HCs the chain bit is set once at segment initalization
+ * and never changed here. On all others, modify it as requested by the caller.
+ */
+ if (!xhci_link_chain_quirk(xhci, ring->type)) {
+ ring->enqueue->link.control &= cpu_to_le32(~TRB_CHAIN);
+ ring->enqueue->link.control |= cpu_to_le32(chain);
+ }
+
+ /* Give this link TRB to the hardware */
+ wmb();
+ ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (link_trb_toggles_cycle(ring->enqueue))
+ ring->cycle_state ^= 1;
+
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+
+ trace_xhci_inc_enq(ring);
+
+ if (link_trb_count++ > ring->num_segs) {
+ xhci_warn(xhci, "Link TRB loop at enqueue\n");
+ break;
+ }
+ }
+}
+
+/*
* See Cycle bit rules. SW is the consumer for the event ring only.
*
* If we've just enqueued a TRB that is in the middle of a TD (meaning the
@@ -211,11 +255,6 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
* If we've enqueued the last TRB in a TD, make sure the following link TRBs
* have their chain bit cleared (so that each Link TRB is a separate TD).
*
- * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
- * set, but other sections talk about dealing with the chain bit set. This was
- * fixed in the 0.96 specification errata, but we have to assume that all 0.95
- * xHCI hardware can't handle the chain bit being cleared on a link TRB.
- *
* @more_trbs_coming: Will you enqueue more TRBs before calling
* prepare_transfer()?
*/
@@ -223,8 +262,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool more_trbs_coming)
{
u32 chain;
- union xhci_trb *next;
- unsigned int link_trb_count = 0;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
@@ -233,48 +270,67 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
return;
}
- next = ++(ring->enqueue);
+ ring->enqueue++;
- /* Update the dequeue pointer further if that was a link TRB */
- while (trb_is_link(next)) {
+ /*
+ * If we are in the middle of a TD or the caller plans to enqueue more
+ * TDs as one transfer (eg. control), traverse any link TRBs right now.
+ * Otherwise, enqueue can stay on a link until the next prepare_ring().
+ * This avoids enqueue entering deq_seg and simplifies ring expansion.
+ */
+ if (trb_is_link(ring->enqueue) && (chain || more_trbs_coming))
+ inc_enq_past_link(xhci, ring, chain);
+}
- /*
- * If the caller doesn't plan on enqueueing more TDs before
- * ringing the doorbell, then we don't want to give the link TRB
- * to the hardware just yet. We'll give the link TRB back in
- * prepare_ring() just before we enqueue the TD at the top of
- * the ring.
- */
- if (!chain && !more_trbs_coming)
- break;
+/*
+ * If the suspect DMA address is a TRB in this TD, this function returns that
+ * TRB's segment. Otherwise it returns 0.
+ */
+static struct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t suspect_dma)
+{
+ dma_addr_t start_dma;
+ dma_addr_t end_seg_dma;
+ dma_addr_t end_trb_dma;
+ struct xhci_segment *cur_seg;
- /* If we're not dealing with 0.95 hardware or isoc rings on
- * AMD 0.96 host, carry over the chain bit of the previous TRB
- * (which may mean the chain bit is cleared).
- */
- if (!xhci_link_chain_quirk(xhci, ring->type)) {
- next->link.control &= cpu_to_le32(~TRB_CHAIN);
- next->link.control |= cpu_to_le32(chain);
- }
- /* Give this link TRB to the hardware */
- wmb();
- next->link.control ^= cpu_to_le32(TRB_CYCLE);
+ start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
+ cur_seg = td->start_seg;
- /* Toggle the cycle bit after the last ring segment. */
- if (link_trb_toggles_cycle(next))
- ring->cycle_state ^= 1;
+ do {
+ if (start_dma == 0)
+ return NULL;
+ /* We may get an event for a Link TRB in the middle of a TD */
+ end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
+ &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
+ /* If the end TRB isn't in this segment, this is set to 0 */
+ end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
- ring->enq_seg = ring->enq_seg->next;
- ring->enqueue = ring->enq_seg->trbs;
- next = ring->enqueue;
+ if (end_trb_dma > 0) {
+ /* The end TRB is in this segment, so suspect should be here */
+ if (start_dma <= end_trb_dma) {
+ if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
+ return cur_seg;
+ } else {
+ /* Case for one segment with
+ * a TD wrapped around to the top
+ */
+ if ((suspect_dma >= start_dma &&
+ suspect_dma <= end_seg_dma) ||
+ (suspect_dma >= cur_seg->dma &&
+ suspect_dma <= end_trb_dma))
+ return cur_seg;
+ }
+ return NULL;
+ }
+ /* Might still be somewhere in this segment */
+ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+ return cur_seg;
- trace_xhci_inc_enq(ring);
+ cur_seg = cur_seg->next;
+ start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+ } while (cur_seg != td->start_seg);
- if (link_trb_count++ > ring->num_segs) {
- xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
- break;
- }
- }
+ return NULL;
}
/*
@@ -505,8 +561,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
* pointer command pending because the device can choose to start any
* stream once the endpoint is on the HW schedule.
*/
- if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
- (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
+ if (ep_state & (EP_STOP_CMD_PENDING | SET_DEQ_PENDING | EP_HALTED |
+ EP_CLEARING_TT | EP_STALLED))
return;
trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
@@ -1014,7 +1070,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
td->urb->stream_id);
hw_deq &= ~0xf;
- if (td->cancel_status == TD_HALTED || trb_in_td(xhci, td, hw_deq, false)) {
+ if (td->cancel_status == TD_HALTED || trb_in_td(td, hw_deq)) {
switch (td->cancel_status) {
case TD_CLEARED: /* TD is already no-op */
case TD_CLEARING_CACHE: /* set TR deq command already queued */
@@ -1104,7 +1160,7 @@ static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
hw_deq &= ~0xf;
td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
- if (trb_in_td(ep->xhci, td, hw_deq, false))
+ if (trb_in_td(td, hw_deq))
return td;
}
return NULL;
@@ -1164,7 +1220,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
*/
switch (GET_EP_CTX_STATE(ep_ctx)) {
case EP_STATE_HALTED:
- xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
+ xhci_dbg(xhci, "Stop ep completion raced with stall\n");
+ /*
+ * If the halt happened before Stop Endpoint failed, its transfer event
+ * should have already been handled and Reset Endpoint should be pending.
+ */
+ if (ep->ep_state & EP_HALTED)
+ goto reset_done;
+
if (ep->ep_state & EP_HAS_STREAMS) {
reset_type = EP_SOFT_RESET;
} else {
@@ -1175,8 +1238,11 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
}
/* reset ep, reset handler cleans up cancelled tds */
err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
+ xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err);
if (err)
break;
+reset_done:
+ /* Reset EP handler will clean up cancelled TDs */
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
case EP_STATE_STOPPED:
@@ -1198,16 +1264,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
* Stopped state, but it will soon change to Running.
*
* Assume this bug on unexpected Stop Endpoint failures.
- * Keep retrying until the EP starts and stops again, on
- * chips where this is known to help. Wait for 100ms.
+ * Keep retrying until the EP starts and stops again.
*/
- if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
- break;
fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
GET_EP_CTX_STATE(ep_ctx));
+ /*
+ * Don't retry forever if we guessed wrong or a defective HC never starts
+ * the EP or says 'Running' but fails the command. We must give back TDs.
+ */
+ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+ break;
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
@@ -1332,43 +1401,6 @@ void xhci_hc_died(struct xhci_hcd *xhci)
usb_hc_died(xhci_to_hcd(xhci));
}
-static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
- struct xhci_virt_device *dev,
- struct xhci_ring *ep_ring,
- unsigned int ep_index)
-{
- union xhci_trb *dequeue_temp;
-
- dequeue_temp = ep_ring->dequeue;
-
- /* If we get two back-to-back stalls, and the first stalled transfer
- * ends just before a link TRB, the dequeue pointer will be left on
- * the link TRB by the code in the while loop. So we have to update
- * the dequeue pointer one segment further, or we'll jump off
- * the segment into la-la-land.
- */
- if (trb_is_link(ep_ring->dequeue)) {
- ep_ring->deq_seg = ep_ring->deq_seg->next;
- ep_ring->dequeue = ep_ring->deq_seg->trbs;
- }
-
- while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
- /* We have more usable TRBs */
- ep_ring->dequeue++;
- if (trb_is_link(ep_ring->dequeue)) {
- if (ep_ring->dequeue ==
- dev->eps[ep_index].queued_deq_ptr)
- break;
- ep_ring->deq_seg = ep_ring->deq_seg->next;
- ep_ring->dequeue = ep_ring->deq_seg->trbs;
- }
- if (ep_ring->dequeue == dequeue_temp) {
- xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
- break;
- }
- }
-}
-
/*
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
* we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -1473,8 +1505,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
- update_ring_for_set_deq_completion(xhci, ep->vdev,
- ep_ring, ep_index);
+ ep_ring->deq_seg = ep->queued_deq_seg;
+ ep_ring->dequeue = ep->queued_deq_ptr;
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
@@ -2116,67 +2148,6 @@ cleanup:
spin_lock(&xhci->lock);
}
-/*
- * If the suspect DMA address is a TRB in this TD, this function returns that
- * TRB's segment. Otherwise it returns 0.
- */
-struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, dma_addr_t suspect_dma,
- bool debug)
-{
- dma_addr_t start_dma;
- dma_addr_t end_seg_dma;
- dma_addr_t end_trb_dma;
- struct xhci_segment *cur_seg;
-
- start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
- cur_seg = td->start_seg;
-
- do {
- if (start_dma == 0)
- return NULL;
- /* We may get an event for a Link TRB in the middle of a TD */
- end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
- &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
- /* If the end TRB isn't in this segment, this is set to 0 */
- end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
-
- if (debug)
- xhci_warn(xhci,
- "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
- (unsigned long long)suspect_dma,
- (unsigned long long)start_dma,
- (unsigned long long)end_trb_dma,
- (unsigned long long)cur_seg->dma,
- (unsigned long long)end_seg_dma);
-
- if (end_trb_dma > 0) {
- /* The end TRB is in this segment, so suspect should be here */
- if (start_dma <= end_trb_dma) {
- if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
- return cur_seg;
- } else {
- /* Case for one segment with
- * a TD wrapped around to the top
- */
- if ((suspect_dma >= start_dma &&
- suspect_dma <= end_seg_dma) ||
- (suspect_dma >= cur_seg->dma &&
- suspect_dma <= end_trb_dma))
- return cur_seg;
- }
- return NULL;
- } else {
- /* Might still be somewhere in this segment */
- if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
- return cur_seg;
- }
- cur_seg = cur_seg->next;
- start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
- } while (cur_seg != td->start_seg);
-
- return NULL;
-}
-
static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_virt_ep *ep)
{
@@ -2476,6 +2447,12 @@ static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
if (ep_trb != td->end_trb)
td->error_mid_td = true;
break;
+ case COMP_MISSED_SERVICE_ERROR:
+ frame->status = -EXDEV;
+ sum_trbs_for_length = true;
+ if (ep_trb != td->end_trb)
+ td->error_mid_td = true;
+ break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
case COMP_STALL_ERROR:
frame->status = -EPROTO;
@@ -2596,6 +2573,9 @@ static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
return;
+ case COMP_STALL_ERROR:
+ ep->ep_state |= EP_STALLED;
+ break;
default:
/* do nothing */
break;
@@ -2644,6 +2624,22 @@ static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_
return 0;
}
+static bool xhci_spurious_success_tx_event(struct xhci_hcd *xhci,
+ struct xhci_ring *ring)
+{
+ switch (ring->old_trb_comp_code) {
+ case COMP_SHORT_PACKET:
+ return xhci->quirks & XHCI_SPURIOUS_SUCCESS;
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_BABBLE_DETECTED_ERROR:
+ case COMP_ISOCH_BUFFER_OVERRUN:
+ return xhci->quirks & XHCI_ETRON_HOST &&
+ ring->type == TYPE_ISOC;
+ default:
+ return false;
+ }
+}
+
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -2664,6 +2660,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
+ bool ring_xrun_event = false;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
@@ -2697,8 +2694,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_SUCCESS:
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
trb_comp_code = COMP_SHORT_PACKET;
- xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
- slot_id, ep_index, ep_ring->last_td_was_short);
+ xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td comp code %d\n",
+ slot_id, ep_index, ep_ring->old_trb_comp_code);
}
break;
case COMP_SHORT_PACKET:
@@ -2770,14 +2767,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* Underrun Event for OUT Isoch endpoint.
*/
xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index);
- if (ep->skip)
- break;
- return 0;
+ ring_xrun_event = true;
+ break;
case COMP_RING_OVERRUN:
xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index);
- if (ep->skip)
- break;
- return 0;
+ ring_xrun_event = true;
+ break;
case COMP_MISSED_SERVICE_ERROR:
/*
* When encounter missed service error, one or more isoc tds
@@ -2787,9 +2782,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
ep->skip = true;
xhci_dbg(xhci,
- "Miss service interval error for slot %u ep %u, set skip flag\n",
- slot_id, ep_index);
- return 0;
+ "Miss service interval error for slot %u ep %u, set skip flag%s\n",
+ slot_id, ep_index, ep_trb_dma ? ", skip now" : "");
+ break;
case COMP_NO_PING_RESPONSE_ERROR:
ep->skip = true;
xhci_dbg(xhci,
@@ -2832,11 +2827,15 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list);
- if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) {
+ if (td && td->error_mid_td && !trb_in_td(td, ep_trb_dma)) {
xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
xhci_dequeue_td(xhci, td, ep_ring, td->status);
}
+ /* If the TRB pointer is NULL, missed TDs will be skipped on the next event */
+ if (trb_comp_code == COMP_MISSED_SERVICE_ERROR && !ep_trb_dma)
+ return 0;
+
if (list_empty(&ep_ring->td_list)) {
/*
* Don't print wanings if ring is empty due to a stopped endpoint generating an
@@ -2846,7 +2845,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
if (trb_comp_code != COMP_STOPPED &&
trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
- !ep_ring->last_td_was_short) {
+ !ring_xrun_event &&
+ !xhci_spurious_success_tx_event(xhci, ep_ring)) {
xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
slot_id, ep_index);
}
@@ -2860,14 +2860,31 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_list);
/* Is this a TRB in the currently executing TD? */
- ep_seg = trb_in_td(xhci, td, ep_trb_dma, false);
+ ep_seg = trb_in_td(td, ep_trb_dma);
if (!ep_seg) {
if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ /* this event is unlikely to match any TD, don't skip them all */
+ if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID)
+ return 0;
+
skip_isoc_td(xhci, td, ep, status);
- if (!list_empty(&ep_ring->td_list))
+
+ if (!list_empty(&ep_ring->td_list)) {
+ if (ring_xrun_event) {
+ /*
+ * If we are here, we are on xHCI 1.0 host with no
+ * idea how many TDs were missed or where the xrun
+ * occurred. New TDs may have been added after the
+ * xrun, so skip only one TD to be safe.
+ */
+ xhci_dbg(xhci, "Skipped one TD for slot %u ep %u",
+ slot_id, ep_index);
+ return 0;
+ }
continue;
+ }
xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n",
slot_id, ep_index);
@@ -2876,6 +2893,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
goto check_endpoint_halted;
}
+ /* TD was queued after xrun, maybe xrun was on a link, don't panic yet */
+ if (ring_xrun_event)
+ return 0;
+
/*
* Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current
* TD pointed by 'ep_ring->dequeue' because that the hardware dequeue
@@ -2890,21 +2911,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/*
* Some hosts give a spurious success event after a short
- * transfer. Ignore it.
+ * transfer or error on last TRB. Ignore it.
*/
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
- ep_ring->last_td_was_short) {
- ep_ring->last_td_was_short = false;
+ if (xhci_spurious_success_tx_event(xhci, ep_ring)) {
+ xhci_dbg(xhci, "Spurious event dma %pad, comp_code %u after %u\n",
+ &ep_trb_dma, trb_comp_code, ep_ring->old_trb_comp_code);
+ ep_ring->old_trb_comp_code = trb_comp_code;
return 0;
}
/* HC is busted, give up! */
- xhci_err(xhci,
- "ERROR Transfer event TRB DMA ptr not part of current TD ep_index %d comp_code %u\n",
- ep_index, trb_comp_code);
- trb_in_td(xhci, td, ep_trb_dma, true);
-
- return -ESHUTDOWN;
+ goto debug_finding_td;
}
if (ep->skip) {
@@ -2922,10 +2939,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
} while (ep->skip);
- if (trb_comp_code == COMP_SHORT_PACKET)
- ep_ring->last_td_was_short = true;
- else
- ep_ring->last_td_was_short = false;
+ ep_ring->old_trb_comp_code = trb_comp_code;
+
+ /* Get out if a TD was queued at enqueue after the xrun occurred */
+ if (ring_xrun_event)
+ return 0;
ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma);
@@ -2957,6 +2975,17 @@ check_endpoint_halted:
return 0;
+debug_finding_td:
+ xhci_err(xhci, "Event dma %pad for ep %d status %d not part of TD at %016llx - %016llx\n",
+ &ep_trb_dma, ep_index, trb_comp_code,
+ (unsigned long long)xhci_trb_virt_to_dma(td->start_seg, td->start_trb),
+ (unsigned long long)xhci_trb_virt_to_dma(td->end_seg, td->end_trb));
+
+ xhci_for_each_ring_seg(ep_ring->first_seg, ep_seg)
+ xhci_warn(xhci, "Ring seg %u dma %pad\n", ep_seg->num, &ep_seg->dma);
+
+ return -ESHUTDOWN;
+
err_out:
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
@@ -3216,7 +3245,6 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
- unsigned int link_trb_count = 0;
unsigned int new_segs = 0;
/* Make sure the endpoint has been added to xHC schedule */
@@ -3264,33 +3292,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
}
- while (trb_is_link(ep_ring->enqueue)) {
- /* If we're not dealing with 0.95 hardware or isoc rings
- * on AMD 0.96 host, clear the chain bit.
- */
- if (!xhci_link_chain_quirk(xhci, ep_ring->type))
- ep_ring->enqueue->link.control &=
- cpu_to_le32(~TRB_CHAIN);
- else
- ep_ring->enqueue->link.control |=
- cpu_to_le32(TRB_CHAIN);
-
- wmb();
- ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
-
- /* Toggle the cycle bit after the last ring segment. */
- if (link_trb_toggles_cycle(ep_ring->enqueue))
- ep_ring->cycle_state ^= 1;
-
- ep_ring->enq_seg = ep_ring->enq_seg->next;
- ep_ring->enqueue = ep_ring->enq_seg->trbs;
-
- /* prevent infinite loop if all first trbs are link trbs */
- if (link_trb_count++ > ep_ring->num_segs) {
- xhci_warn(xhci, "Ring is an endless link TRB loop\n");
- return -EINVAL;
- }
- }
+ /* Ensure that new TRBs won't overwrite a link */
+ if (trb_is_link(ep_ring->enqueue))
+ inc_enq_past_link(xhci, ep_ring, 0);
if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
xhci_warn(xhci, "Missing link TRB at end of ring segment\n");