There seems to be no need to keep invalidated TDs around and require callers to follow up with xhci_giveback_invalidated_tds(), so make xhci_invalidate_cancelled_tds() give back invalidated TDs right away. TD_CLEARED cancel status is no longer useful and therefore removed. Code complexity and overhead of repeated list traversals are reduced. There is no more need to debug interactions between these functions, so a big source of xhci_dbg() noise (and potential bugs) goes away. >From now, "Removing cancelled TD starting at ..." really means that the TD is being removed, unless one of the errors below is logged or dynamic debug indicates that Set TR Dequeue is queued or deferred. Also clean up some debug noise from xhci_handle_cmd_set_deq(), which practically duplicates xhci_invalidate_cancelled_tds() messages that will be printed just before this completion handler returns. Signed-off-by: Michal Pecio <michal.pecio@xxxxxxxxx> --- drivers/usb/host/xhci-ring.c | 49 +++++++----------------------------- drivers/usb/host/xhci.h | 1 - 2 files changed, 9 insertions(+), 41 deletions(-) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 12087f2f9db7..fd0f8a9196e2 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -927,30 +927,6 @@ static void xhci_dequeue_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xh xhci_td_cleanup(xhci, td, ring, status); } -/* Complete the cancelled URBs we unlinked from td_list. */ -static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep) -{ - struct xhci_ring *ring; - struct xhci_td *td, *tmp_td; - - list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, - cancelled_td_list) { - - ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); - - if (td->cancel_status == TD_CLEARED) { - xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", - __func__, td->urb); - xhci_td_cleanup(ep->xhci, td, ring, td->status); - } else { - xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", - __func__, td->urb, td->cancel_status); - } - if (ep->xhci->xhc_state & XHCI_STATE_DYING) - return; - } -} - static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, enum xhci_ep_reset_type reset_type) { @@ -1031,7 +1007,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) struct xhci_td *td = NULL; struct xhci_td *tmp_td = NULL; struct xhci_td *cached_td = NULL; - struct xhci_ring *ring; + struct xhci_ring *ring, *cached_ring = NULL; u64 hw_deq; unsigned int slot_id = ep->vdev->slot_id; int err; @@ -1070,7 +1046,6 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) if (td->cancel_status == TD_HALTED || trb_in_td(td, hw_deq)) { switch (td->cancel_status) { - case TD_CLEARED: /* TD is already no-op */ case TD_CLEARING_CACHE: /* set TR deq command already queued */ break; case TD_DIRTY: /* TD is cached, clear it */ @@ -1092,16 +1067,18 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) td->urb, cached_td->urb, td->urb->stream_id); td_to_noop(cached_td, false); - cached_td->cancel_status = TD_CLEARED; + xhci_td_cleanup(xhci, cached_td, cached_ring, + cached_td->status); } td_to_noop(td, false); td->cancel_status = TD_CLEARING_CACHE; + cached_ring = ring; cached_td = td; break; } } else { td_to_noop(td, false); - td->cancel_status = TD_CLEARED; + xhci_td_cleanup(xhci, td, ring, td->status); } } @@ -1123,10 +1100,12 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) if (td->cancel_status != TD_CLEARING_CACHE && td->cancel_status != TD_CLEARING_CACHE_DEFERRED) continue; - xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", + xhci_warn(xhci, "Failed to clear cancelled cached URB %p, give back anyway\n", td->urb); td_to_noop(td, false); - td->cancel_status = TD_CLEARED; + ring = xhci_urb_to_transfer_ring(xhci, td->urb); + xhci_td_cleanup(xhci, td, ring, td->status); + } } return 0; @@ -1142,7 +1121,6 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) void xhci_process_cancelled_tds(struct xhci_virt_ep *ep) { xhci_invalidate_cancelled_tds(ep); - xhci_giveback_invalidated_tds(ep); } /* @@ -1295,7 +1273,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, ep->ep_state &= ~EP_STOP_CMD_PENDING; /* Otherwise ring the doorbell(s) to restart queued transfers */ - xhci_giveback_invalidated_tds(ep); ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } @@ -1517,13 +1494,9 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, cancelled_td_list) { ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); if (td->cancel_status == TD_CLEARING_CACHE) { - td->cancel_status = TD_CLEARED; xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", __func__, td->urb); xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); - } else { - xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", - __func__, td->urb, td->cancel_status); } } cleanup: @@ -1538,8 +1511,6 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, xhci_invalidate_cancelled_tds(ep); /* Try to restart the endpoint if all is done */ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); - /* Start giving back any TDs invalidated above */ - xhci_giveback_invalidated_tds(ep); } else { /* Restart any rings with pending URBs */ xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__); @@ -1574,8 +1545,6 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, /* Clear our internal halted state */ ep->ep_state &= ~EP_HALTED; - xhci_giveback_invalidated_tds(ep); - /* if this was a soft reset, then restart */ if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) ring_doorbell_for_active_rings(xhci, slot_id, ep_index); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index e87ffb284ab2..94394db271bf 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1297,7 +1297,6 @@ enum xhci_cancelled_td_status { TD_HALTED, TD_CLEARING_CACHE, TD_CLEARING_CACHE_DEFERRED, - TD_CLEARED, }; struct xhci_td { -- 2.48.1