blob: 2c255d0620b054d789e6a5b8a044205debd4bbab [file] [log] [blame]
Greg Kroah-Hartman5fd54ac2017-11-03 11:28:30 +01001// SPDX-License-Identifier: GPL-2.0
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
Sarah Sharp7f84eef2009-04-27 19:53:56 -07009 */
10
11/*
12 * Ring initialization rules:
13 * 1. Each segment is initialized to zero, except for link TRBs.
14 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
15 * Consumer Cycle State (CCS), depending on ring function.
16 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
17 *
18 * Ring behavior rules:
19 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
20 * least one free TRB in the ring. This is useful if you want to turn that
21 * into a link TRB and expand the ring.
22 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
23 * link TRB, then load the pointer with the address in the link TRB. If the
24 * link TRB had its toggle bit set, you may need to update the ring cycle
25 * state (see cycle bit rules). You may have to do this multiple times
26 * until you reach a non-link TRB.
27 * 3. A ring is full if enqueue++ (for the definition of increment above)
28 * equals the dequeue pointer.
29 *
30 * Cycle bit rules:
31 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
32 * in a link TRB, it must toggle the ring cycle state.
33 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
34 * in a link TRB, it must toggle the ring cycle state.
35 *
36 * Producer rules:
37 * 1. Check if ring is full before you enqueue.
38 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
39 * Update enqueue pointer between each write (which may update the ring
40 * cycle state).
41 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
42 * and endpoint rings. If HC is the producer for the event ring,
43 * and it generates an interrupt according to interrupt modulation rules.
44 *
45 * Consumer rules:
46 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
47 * the TRB is owned by the consumer.
48 * 2. Update dequeue pointer (which may update the ring cycle state) and
49 * continue processing TRBs until you reach a TRB which is not owned by you.
50 * 3. Notify the producer. SW is the consumer for the event ring, and it
51 * updates event ring dequeue pointer. HC is the consumer for the command and
52 * endpoint rings; it generates events on the event ring for these.
53 */
54
Sarah Sharp8a96c052009-04-27 19:59:19 -070055#include <linux/scatterlist.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090056#include <linux/slab.h>
Mathias Nymanf9c589e2016-06-21 10:58:02 +030057#include <linux/dma-mapping.h>
Sarah Sharp7f84eef2009-04-27 19:53:56 -070058#include "xhci.h"
Xenia Ragiadakou3a7fa5b2013-07-31 07:35:27 +030059#include "xhci-trace.h"
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +020060#include "xhci-mtk.h"
Sarah Sharp7f84eef2009-04-27 19:53:56 -070061
62/*
63 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
64 * address of the TRB.
65 */
Sarah Sharp23e3be12009-04-29 19:05:20 -070066dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
Sarah Sharp7f84eef2009-04-27 19:53:56 -070067 union xhci_trb *trb)
68{
Sarah Sharp6071d832009-05-14 11:44:14 -070069 unsigned long segment_offset;
Sarah Sharp7f84eef2009-04-27 19:53:56 -070070
Sarah Sharp6071d832009-05-14 11:44:14 -070071 if (!seg || !trb || trb < seg->trbs)
Sarah Sharp7f84eef2009-04-27 19:53:56 -070072 return 0;
Sarah Sharp6071d832009-05-14 11:44:14 -070073 /* offset in TRBs */
74 segment_offset = trb - seg->trbs;
Mathias Nyman78950862015-08-03 16:07:48 +030075 if (segment_offset >= TRBS_PER_SEGMENT)
Sarah Sharp7f84eef2009-04-27 19:53:56 -070076 return 0;
Sarah Sharp6071d832009-05-14 11:44:14 -070077 return seg->dma + (segment_offset * sizeof(*trb));
Sarah Sharp7f84eef2009-04-27 19:53:56 -070078}
79
Mathias Nyman0ce57492016-11-11 15:13:14 +020080static bool trb_is_noop(union xhci_trb *trb)
81{
82 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
83}
84
Mathias Nyman2d98ef42016-06-21 10:58:04 +030085static bool trb_is_link(union xhci_trb *trb)
86{
87 return TRB_TYPE_LINK_LE32(trb->link.control);
88}
89
Mathias Nymanbd5e67f2016-06-21 10:58:05 +030090static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
91{
92 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
93}
94
95static bool last_trb_on_ring(struct xhci_ring *ring,
96 struct xhci_segment *seg, union xhci_trb *trb)
97{
98 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
99}
100
Mathias Nymand0c77d82016-06-21 10:58:07 +0300101static bool link_trb_toggles_cycle(union xhci_trb *trb)
102{
103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104}
105
Mathias Nyman2a721262016-11-11 15:13:24 +0200106static bool last_td_in_urb(struct xhci_td *td)
107{
108 struct urb_priv *urb_priv = td->urb->hcpriv;
109
Mathias Nyman9ef7fbb2017-01-23 14:20:25 +0200110 return urb_priv->num_tds_done == urb_priv->num_tds;
Mathias Nyman2a721262016-11-11 15:13:24 +0200111}
112
113static void inc_td_cnt(struct urb *urb)
114{
115 struct urb_priv *urb_priv = urb->hcpriv;
116
Mathias Nyman9ef7fbb2017-01-23 14:20:25 +0200117 urb_priv->num_tds_done++;
Mathias Nyman2a721262016-11-11 15:13:24 +0200118}
119
Mathias Nymanae1e3f02017-01-23 14:20:15 +0200120static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
121{
122 if (trb_is_link(trb)) {
123 /* unchain chained link TRBs */
124 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
125 } else {
126 trb->generic.field[0] = 0;
127 trb->generic.field[1] = 0;
128 trb->generic.field[2] = 0;
129 /* Preserve only the cycle bit of this TRB */
130 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
131 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
132 }
133}
134
Sarah Sharpae636742009-04-29 19:02:31 -0700135/* Updates trb to point to the next TRB in the ring, and updates seg if the next
136 * TRB is in a new segment. This does not skip over link TRBs, and it does not
137 * effect the ring dequeue or enqueue pointers.
138 */
139static void next_trb(struct xhci_hcd *xhci,
140 struct xhci_ring *ring,
141 struct xhci_segment **seg,
142 union xhci_trb **trb)
143{
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300144 if (trb_is_link(*trb)) {
Sarah Sharpae636742009-04-29 19:02:31 -0700145 *seg = (*seg)->next;
146 *trb = ((*seg)->trbs);
147 } else {
John Youna1669b22010-08-09 13:56:11 -0700148 (*trb)++;
Sarah Sharpae636742009-04-29 19:02:31 -0700149 }
150}
151
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700152/*
153 * See Cycle bit rules. SW is the consumer for the event ring only.
154 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
155 */
Lu Baolu67d2ea92017-12-08 17:59:09 +0200156void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700157{
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300158 /* event ring doesn't have link trbs, check for last trb */
159 if (ring->type == TYPE_EVENT) {
160 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
Sarah Sharp50d02062012-07-26 12:03:59 -0700161 ring->dequeue++;
Adam Wallis49d5b052017-10-05 11:21:47 +0300162 goto out;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700163 }
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300164 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
165 ring->cycle_state ^= 1;
166 ring->deq_seg = ring->deq_seg->next;
167 ring->dequeue = ring->deq_seg->trbs;
Adam Wallis49d5b052017-10-05 11:21:47 +0300168 goto out;
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300169 }
170
171 /* All other rings have link trbs */
172 if (!trb_is_link(ring->dequeue)) {
173 ring->dequeue++;
174 ring->num_trbs_free++;
175 }
176 while (trb_is_link(ring->dequeue)) {
177 ring->deq_seg = ring->deq_seg->next;
178 ring->dequeue = ring->deq_seg->trbs;
179 }
Lu Baolub2d6edb2017-04-07 17:57:02 +0300180
Adam Wallis49d5b052017-10-05 11:21:47 +0300181out:
Lu Baolub2d6edb2017-04-07 17:57:02 +0300182 trace_xhci_inc_deq(ring);
183
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300184 return;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700185}
186
187/*
188 * See Cycle bit rules. SW is the consumer for the event ring only.
189 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
190 *
191 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
192 * chain bit is set), then set the chain bit in all the following link TRBs.
193 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
194 * have their chain bit cleared (so that each Link TRB is a separate TD).
195 *
196 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
Sarah Sharpb0567b32009-08-07 14:04:36 -0700197 * set, but other sections talk about dealing with the chain bit set. This was
198 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
199 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
Sarah Sharp6cc30d82010-06-10 12:25:28 -0700200 *
201 * @more_trbs_coming: Will you enqueue more TRBs before calling
202 * prepare_transfer()?
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700203 */
Sarah Sharp6cc30d82010-06-10 12:25:28 -0700204static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
Andiry Xu3b72fca2012-03-05 17:49:32 +0800205 bool more_trbs_coming)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700206{
207 u32 chain;
208 union xhci_trb *next;
209
Matt Evans28ccd292011-03-29 13:40:46 +1100210 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
Andiry Xub008df62012-03-05 17:49:34 +0800211 /* If this is not event ring, there is one less usable TRB */
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300212 if (!trb_is_link(ring->enqueue))
Andiry Xub008df62012-03-05 17:49:34 +0800213 ring->num_trbs_free--;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700214 next = ++(ring->enqueue);
215
Mathias Nyman22511982016-06-21 10:58:03 +0300216 /* Update the dequeue pointer further if that was a link TRB */
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300217 while (trb_is_link(next)) {
Sarah Sharp6cc30d82010-06-10 12:25:28 -0700218
Mathias Nyman22511982016-06-21 10:58:03 +0300219 /*
220 * If the caller doesn't plan on enqueueing more TDs before
221 * ringing the doorbell, then we don't want to give the link TRB
222 * to the hardware just yet. We'll give the link TRB back in
223 * prepare_ring() just before we enqueue the TD at the top of
224 * the ring.
225 */
226 if (!chain && !more_trbs_coming)
227 break;
Andiry Xu3b72fca2012-03-05 17:49:32 +0800228
Mathias Nyman22511982016-06-21 10:58:03 +0300229 /* If we're not dealing with 0.95 hardware or isoc rings on
230 * AMD 0.96 host, carry over the chain bit of the previous TRB
231 * (which may mean the chain bit is cleared).
232 */
233 if (!(ring->type == TYPE_ISOC &&
234 (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
235 !xhci_link_trb_quirk(xhci)) {
236 next->link.control &= cpu_to_le32(~TRB_CHAIN);
237 next->link.control |= cpu_to_le32(chain);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700238 }
Mathias Nyman22511982016-06-21 10:58:03 +0300239 /* Give this link TRB to the hardware */
240 wmb();
241 next->link.control ^= cpu_to_le32(TRB_CYCLE);
242
243 /* Toggle the cycle bit after the last ring segment. */
Mathias Nymand0c77d82016-06-21 10:58:07 +0300244 if (link_trb_toggles_cycle(next))
Mathias Nyman22511982016-06-21 10:58:03 +0300245 ring->cycle_state ^= 1;
246
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700247 ring->enq_seg = ring->enq_seg->next;
248 ring->enqueue = ring->enq_seg->trbs;
249 next = ring->enqueue;
250 }
Lu Baolub2d6edb2017-04-07 17:57:02 +0300251
252 trace_xhci_inc_enq(ring);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700253}
254
255/*
Andiry Xu085deb12012-03-05 17:49:40 +0800256 * Check to see if there's room to enqueue num_trbs on the ring and make sure
257 * enqueue pointer will not advance into dequeue segment. See rules above.
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700258 */
Andiry Xub008df62012-03-05 17:49:34 +0800259static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700260 unsigned int num_trbs)
261{
Andiry Xu085deb12012-03-05 17:49:40 +0800262 int num_trbs_in_deq_seg;
Andiry Xub008df62012-03-05 17:49:34 +0800263
Andiry Xu085deb12012-03-05 17:49:40 +0800264 if (ring->num_trbs_free < num_trbs)
265 return 0;
266
267 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
268 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
269 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
270 return 0;
271 }
272
273 return 1;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700274}
275
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700276/* Ring the host controller doorbell after placing a command on the ring */
Sarah Sharp23e3be12009-04-29 19:05:20 -0700277void xhci_ring_cmd_db(struct xhci_hcd *xhci)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700278{
Elric Fuc181bc52012-06-27 16:30:57 +0800279 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
280 return;
281
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700282 xhci_dbg(xhci, "// Ding dong!\n");
Mathias Nyman58b9d712019-11-15 18:50:01 +0200283
284 trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
285
Xenia Ragiadakou204b7792013-11-15 05:34:07 +0200286 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700287 /* Flush PCI posted writes */
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +0200288 readl(&xhci->dba->doorbell[0]);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700289}
290
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +0200291static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
292{
293 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
294}
295
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200296static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
297{
298 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
299 cmd_list);
300}
301
302/*
303 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
304 * If there are other commands waiting then restart the ring and kick the timer.
305 * This must be called with command ring stopped and xhci->lock held.
306 */
307static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
308 struct xhci_command *cur_cmd)
309{
310 struct xhci_command *i_cmd;
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200311
312 /* Turn all aborted commands in list to no-ops, then restart */
313 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
314
Felipe Balbi0b7c1052017-01-23 14:20:06 +0200315 if (i_cmd->status != COMP_COMMAND_ABORTED)
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200316 continue;
317
Mathias Nyman604d02a2017-05-17 18:32:05 +0300318 i_cmd->status = COMP_COMMAND_RING_STOPPED;
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200319
320 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
321 i_cmd->command_trb);
Mathias Nyman52782042017-01-23 14:20:16 +0200322
323 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200324
325 /*
326 * caller waiting for completion is called when command
327 * completion event is received for these no-op commands
328 */
329 }
330
331 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
332
333 /* ring command ring doorbell to restart the command ring */
334 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
335 !(xhci->xhc_state & XHCI_STATE_DYING)) {
336 xhci->current_cmd = cur_cmd;
337 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
338 xhci_ring_cmd_db(xhci);
339 }
340}
341
342/* Must be called with xhci->lock held, releases and aquires lock back */
343static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
Elric Fub92cc662012-06-27 16:31:12 +0800344{
345 u64 temp_64;
346 int ret;
347
348 xhci_dbg(xhci, "Abort command ring\n");
349
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200350 reinit_completion(&xhci->cmd_ring_stop_completion);
Mathias Nyman3425aa02016-06-01 18:09:08 +0300351
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200352 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
Sarah Sharp477632d2014-01-29 14:02:00 -0800353 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
354 &xhci->op_regs->cmd_ring);
Elric Fub92cc662012-06-27 16:31:12 +0800355
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300356 /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
357 * completion of the Command Abort operation. If CRR is not negated in 5
358 * seconds then driver handles it as if host died (-ENODEV).
359 * In the future we should distinguish between -ENODEV and -ETIMEDOUT
360 * and try to recover a -ETIMEDOUT with a host controller reset.
Elric Fub92cc662012-06-27 16:31:12 +0800361 */
Lin Wangdc0b1772015-01-09 16:06:28 +0200362 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
Elric Fub92cc662012-06-27 16:31:12 +0800363 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
364 if (ret < 0) {
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300365 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
Lu Baolu1cc6d862017-01-23 14:19:55 +0200366 xhci_halt(xhci);
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300367 xhci_hc_died(xhci);
368 return ret;
Elric Fub92cc662012-06-27 16:31:12 +0800369 }
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200370 /*
371 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
372 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
373 * but the completion event in never sent. Wait 2 secs (arbitrary
374 * number) to handle those cases after negation of CMD_RING_RUNNING.
375 */
376 spin_unlock_irqrestore(&xhci->lock, flags);
377 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
378 msecs_to_jiffies(2000));
379 spin_lock_irqsave(&xhci->lock, flags);
380 if (!ret) {
381 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
382 xhci_cleanup_command_queue(xhci);
383 } else {
384 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
385 }
Elric Fub92cc662012-06-27 16:31:12 +0800386 return 0;
387}
388
Andiry Xube88fe42010-10-14 07:22:57 -0700389void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
Sarah Sharpae636742009-04-29 19:02:31 -0700390 unsigned int slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700391 unsigned int ep_index,
392 unsigned int stream_id)
Sarah Sharpae636742009-04-29 19:02:31 -0700393{
Matt Evans28ccd292011-03-29 13:40:46 +1100394 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
Matthew Wilcox50d646762010-12-15 14:18:11 -0500395 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
396 unsigned int ep_state = ep->ep_state;
Sarah Sharpae636742009-04-29 19:02:31 -0700397
Sarah Sharpae636742009-04-29 19:02:31 -0700398 /* Don't ring the doorbell for this endpoint if there are pending
Matthew Wilcox50d646762010-12-15 14:18:11 -0500399 * cancellations because we don't want to interrupt processing.
Sarah Sharp8df75f42010-04-02 15:34:16 -0700400 * We don't want to restart any stream rings if there's a set dequeue
401 * pointer command pending because the device can choose to start any
402 * stream once the endpoint is on the HW schedule.
Sarah Sharpae636742009-04-29 19:02:31 -0700403 */
Mathias Nyman9983a5f2017-01-23 14:19:52 +0200404 if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
Jim Linef513be2019-06-03 18:53:44 +0800405 (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
Matthew Wilcox50d646762010-12-15 14:18:11 -0500406 return;
Mathias Nyman58b9d712019-11-15 18:50:01 +0200407
408 trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
409
Xenia Ragiadakou204b7792013-11-15 05:34:07 +0200410 writel(DB_VALUE(ep_index, stream_id), db_addr);
Matthew Wilcox50d646762010-12-15 14:18:11 -0500411 /* The CPU has better things to do at this point than wait for a
412 * write-posting flush. It'll get there soon enough.
413 */
Sarah Sharpae636742009-04-29 19:02:31 -0700414}
415
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700416/* Ring the doorbell for any rings with pending URBs */
417static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
418 unsigned int slot_id,
419 unsigned int ep_index)
420{
421 unsigned int stream_id;
422 struct xhci_virt_ep *ep;
423
424 ep = &xhci->devs[slot_id]->eps[ep_index];
425
426 /* A ring has pending URBs if its TD list is not empty */
427 if (!(ep->ep_state & EP_HAS_STREAMS)) {
Oleksij Rempeld66eaf92013-07-21 15:36:19 +0200428 if (ep->ring && !(list_empty(&ep->ring->td_list)))
Andiry Xube88fe42010-10-14 07:22:57 -0700429 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700430 return;
431 }
432
433 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
434 stream_id++) {
435 struct xhci_stream_info *stream_info = ep->stream_info;
436 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
Andiry Xube88fe42010-10-14 07:22:57 -0700437 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
438 stream_id);
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700439 }
440}
441
Jim Linef513be2019-06-03 18:53:44 +0800442void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
443 unsigned int slot_id,
444 unsigned int ep_index)
445{
446 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
447}
448
Alexandr Ivanov75b040e2016-04-22 13:17:10 +0300449/* Get the right ring for the given slot_id, ep_index and stream_id.
450 * If the endpoint supports streams, boundary check the URB's stream ID.
451 * If the endpoint doesn't support streams, return the singular endpoint ring.
452 */
453struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
Sarah Sharp021bff92010-07-29 22:12:20 -0700454 unsigned int slot_id, unsigned int ep_index,
455 unsigned int stream_id)
456{
457 struct xhci_virt_ep *ep;
458
459 ep = &xhci->devs[slot_id]->eps[ep_index];
460 /* Common case: no streams */
461 if (!(ep->ep_state & EP_HAS_STREAMS))
462 return ep->ring;
463
464 if (stream_id == 0) {
465 xhci_warn(xhci,
466 "WARN: Slot ID %u, ep index %u has streams, "
467 "but URB has no stream ID.\n",
468 slot_id, ep_index);
469 return NULL;
470 }
471
472 if (stream_id < ep->stream_info->num_streams)
473 return ep->stream_info->stream_rings[stream_id];
474
475 xhci_warn(xhci,
476 "WARN: Slot ID %u, ep index %u has "
477 "stream IDs 1 to %u allocated, "
478 "but stream ID %u is requested.\n",
479 slot_id, ep_index,
480 ep->stream_info->num_streams - 1,
481 stream_id);
482 return NULL;
483}
484
Mathias Nymane6b20122017-06-02 16:36:22 +0300485
486/*
487 * Get the hw dequeue pointer xHC stopped on, either directly from the
488 * endpoint context, or if streams are in use from the stream context.
489 * The returned hw_dequeue contains the lowest four bits with cycle state
490 * and possbile stream context type.
491 */
492static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
493 unsigned int ep_index, unsigned int stream_id)
494{
495 struct xhci_ep_ctx *ep_ctx;
496 struct xhci_stream_ctx *st_ctx;
497 struct xhci_virt_ep *ep;
498
499 ep = &vdev->eps[ep_index];
500
501 if (ep->ep_state & EP_HAS_STREAMS) {
502 st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
503 return le64_to_cpu(st_ctx->stream_ring);
504 }
505 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
506 return le64_to_cpu(ep_ctx->deq);
507}
508
Sarah Sharpae636742009-04-29 19:02:31 -0700509/*
510 * Move the xHC's endpoint ring dequeue pointer past cur_td.
511 * Record the new state of the xHC's endpoint ring dequeue segment,
Mathias Nyman87907362017-06-02 16:36:23 +0300512 * dequeue pointer, stream id, and new consumer cycle state in state.
Sarah Sharpae636742009-04-29 19:02:31 -0700513 * Update our internal representation of the ring's dequeue pointer.
514 *
515 * We do this in three jumps:
516 * - First we update our new ring state to be the same as when the xHC stopped.
517 * - Then we traverse the ring to find the segment that contains
518 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
519 * any link TRBs with the toggle cycle bit set.
520 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
521 * if we've moved it past a link TRB with the toggle cycle bit set.
Matt Evans28ccd292011-03-29 13:40:46 +1100522 *
523 * Some of the uses of xhci_generic_trb are grotty, but if they're done
524 * with correct __le32 accesses they should work fine. Only users of this are
525 * in here.
Sarah Sharpae636742009-04-29 19:02:31 -0700526 */
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700527void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
Sarah Sharpae636742009-04-29 19:02:31 -0700528 unsigned int slot_id, unsigned int ep_index,
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700529 unsigned int stream_id, struct xhci_td *cur_td,
530 struct xhci_dequeue_state *state)
Sarah Sharpae636742009-04-29 19:02:31 -0700531{
532 struct xhci_virt_device *dev = xhci->devs[slot_id];
Hans de Goedec4bedb72013-10-04 00:29:47 +0200533 struct xhci_virt_ep *ep = &dev->eps[ep_index];
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700534 struct xhci_ring *ep_ring;
Mathias Nyman365038d2014-08-19 15:17:58 +0300535 struct xhci_segment *new_seg;
536 union xhci_trb *new_deq;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700537 dma_addr_t addr;
Julius Werner1f81b6d2014-04-25 19:20:13 +0300538 u64 hw_dequeue;
Mathias Nyman365038d2014-08-19 15:17:58 +0300539 bool cycle_found = false;
540 bool td_last_trb_found = false;
Sarah Sharpae636742009-04-29 19:02:31 -0700541
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700542 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
543 ep_index, stream_id);
544 if (!ep_ring) {
545 xhci_warn(xhci, "WARN can't find new dequeue state "
546 "for invalid stream ID %u.\n",
547 stream_id);
548 return;
549 }
Mathias Nyman93ceaa82020-04-21 17:08:20 +0300550 /*
551 * A cancelled TD can complete with a stall if HW cached the trb.
552 * In this case driver can't find cur_td, but if the ring is empty we
553 * can move the dequeue pointer to the current enqueue position.
554 */
555 if (!cur_td) {
556 if (list_empty(&ep_ring->td_list)) {
557 state->new_deq_seg = ep_ring->enq_seg;
558 state->new_deq_ptr = ep_ring->enqueue;
559 state->new_cycle_state = ep_ring->cycle_state;
560 goto done;
561 } else {
562 xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n");
563 return;
564 }
565 }
566
Sarah Sharpae636742009-04-29 19:02:31 -0700567 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300568 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
569 "Finding endpoint context");
Sarah Sharpae636742009-04-29 19:02:31 -0700570
Mathias Nymane6b20122017-06-02 16:36:22 +0300571 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
Mathias Nyman365038d2014-08-19 15:17:58 +0300572 new_seg = ep_ring->deq_seg;
573 new_deq = ep_ring->dequeue;
574 state->new_cycle_state = hw_dequeue & 0x1;
Mathias Nyman87907362017-06-02 16:36:23 +0300575 state->stream_id = stream_id;
Mathias Nyman365038d2014-08-19 15:17:58 +0300576
577 /*
578 * We want to find the pointer, segment and cycle state of the new trb
579 * (the one after current TD's last_trb). We know the cycle state at
580 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
581 * found.
582 */
583 do {
584 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
585 == (dma_addr_t)(hw_dequeue & ~0xf)) {
586 cycle_found = true;
587 if (td_last_trb_found)
588 break;
589 }
590 if (new_deq == cur_td->last_trb)
591 td_last_trb_found = true;
592
Mathias Nyman3495e452016-11-11 15:13:13 +0200593 if (cycle_found && trb_is_link(new_deq) &&
594 link_trb_toggles_cycle(new_deq))
Mathias Nyman365038d2014-08-19 15:17:58 +0300595 state->new_cycle_state ^= 0x1;
596
597 next_trb(xhci, ep_ring, &new_seg, &new_deq);
598
599 /* Search wrapped around, bail out */
600 if (new_deq == ep->ring->dequeue) {
601 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
602 state->new_deq_seg = NULL;
603 state->new_deq_ptr = NULL;
Julius Werner1f81b6d2014-04-25 19:20:13 +0300604 return;
605 }
Julius Werner1f81b6d2014-04-25 19:20:13 +0300606
Mathias Nyman365038d2014-08-19 15:17:58 +0300607 } while (!cycle_found || !td_last_trb_found);
Sarah Sharpae636742009-04-29 19:02:31 -0700608
Mathias Nyman365038d2014-08-19 15:17:58 +0300609 state->new_deq_seg = new_seg;
610 state->new_deq_ptr = new_deq;
Sarah Sharpae636742009-04-29 19:02:31 -0700611
Mathias Nyman93ceaa82020-04-21 17:08:20 +0300612done:
Julius Werner1f81b6d2014-04-25 19:20:13 +0300613 /* Don't update the ring cycle state for the producer (us). */
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300614 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
615 "Cycle state = 0x%x", state->new_cycle_state);
Sarah Sharp01a1fdb2011-02-23 18:12:29 -0800616
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300617 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
618 "New dequeue segment = %p (virtual)",
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700619 state->new_deq_seg);
620 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300621 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
622 "New dequeue pointer = 0x%llx (DMA)",
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700623 (unsigned long long) addr);
Sarah Sharpae636742009-04-29 19:02:31 -0700624}
625
Sarah Sharp522989a2011-07-29 12:44:32 -0700626/* flip_cycle means flip the cycle bit of all but the first and last TRB.
627 * (The last TRB actually points to the ring enqueue pointer, which is not part
628 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
629 */
Sarah Sharp23e3be12009-04-29 19:05:20 -0700630static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
Mathias Nyman0d58a1a2016-11-11 15:13:20 +0200631 struct xhci_td *td, bool flip_cycle)
Sarah Sharpae636742009-04-29 19:02:31 -0700632{
Mathias Nyman0d58a1a2016-11-11 15:13:20 +0200633 struct xhci_segment *seg = td->start_seg;
634 union xhci_trb *trb = td->first_trb;
Sarah Sharpae636742009-04-29 19:02:31 -0700635
Mathias Nyman0d58a1a2016-11-11 15:13:20 +0200636 while (1) {
Mathias Nymanae1e3f02017-01-23 14:20:15 +0200637 trb_to_noop(trb, TRB_TR_NOOP);
638
Mathias Nyman0d58a1a2016-11-11 15:13:20 +0200639 /* flip cycle if asked to */
640 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
641 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
642
643 if (trb == td->last_trb)
Sarah Sharpae636742009-04-29 19:02:31 -0700644 break;
Mathias Nyman0d58a1a2016-11-11 15:13:20 +0200645
646 next_trb(xhci, ep_ring, &seg, &trb);
Sarah Sharpae636742009-04-29 19:02:31 -0700647 }
648}
649
Dmitry Torokhov575688e2011-03-20 02:15:16 -0700650static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700651 struct xhci_virt_ep *ep)
652{
Mathias Nyman9983a5f2017-01-23 14:19:52 +0200653 ep->ep_state &= ~EP_STOP_CMD_PENDING;
Mathias Nymanf9926592017-01-23 14:19:53 +0200654 /* Can't del_timer_sync in interrupt */
655 del_timer(&ep->stop_cmd_timer);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700656}
657
Mathias Nyman446b3142016-11-11 15:13:22 +0200658/*
Mathias Nyman2a721262016-11-11 15:13:24 +0200659 * Must be called with xhci->lock held in interrupt context,
660 * releases and re-acquires xhci->lock
Mathias Nyman446b3142016-11-11 15:13:22 +0200661 */
Mathias Nyman2a721262016-11-11 15:13:24 +0200662static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
663 struct xhci_td *cur_td, int status)
Mathias Nyman446b3142016-11-11 15:13:22 +0200664{
Mathias Nyman2a721262016-11-11 15:13:24 +0200665 struct urb *urb = cur_td->urb;
666 struct urb_priv *urb_priv = urb->hcpriv;
667 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
Mathias Nyman446b3142016-11-11 15:13:22 +0200668
Mathias Nyman2a721262016-11-11 15:13:24 +0200669 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
670 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
671 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
672 if (xhci->quirks & XHCI_AMD_PLL_FIX)
673 usb_amd_quirk_pll_enable();
674 }
675 }
Mathias Nyman446b3142016-11-11 15:13:22 +0200676 xhci_urb_free_priv(urb_priv);
Mathias Nyman2a721262016-11-11 15:13:24 +0200677 usb_hcd_unlink_urb_from_ep(hcd, urb);
Felipe Balbi5abdc2e2017-01-23 14:20:20 +0200678 trace_xhci_urb_giveback(urb);
Mathias Nyman7bc5d5a2017-05-17 18:31:59 +0300679 usb_hcd_giveback_urb(hcd, urb, status);
Mathias Nyman446b3142016-11-11 15:13:22 +0200680}
681
Wei Yongjun2d6d5762016-11-11 15:13:21 +0200682static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
683 struct xhci_ring *ring, struct xhci_td *td)
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300684{
685 struct device *dev = xhci_to_hcd(xhci)->self.controller;
686 struct xhci_segment *seg = td->bounce_seg;
687 struct urb *urb = td->urb;
Henry Lin597c56e2019-05-22 14:33:57 +0300688 size_t len;
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300689
Felipe Balbif45e2a02017-01-23 14:20:13 +0200690 if (!ring || !seg || !urb)
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300691 return;
692
693 if (usb_urb_dir_out(urb)) {
694 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
695 DMA_TO_DEVICE);
696 return;
697 }
698
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300699 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
700 DMA_FROM_DEVICE);
Henry Lin597c56e2019-05-22 14:33:57 +0300701 /* for in tranfers we need to copy the data from bounce to sg */
702 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
703 seg->bounce_len, seg->bounce_offs);
704 if (len != seg->bounce_len)
Fabio Estevamc1a145a2019-05-22 10:35:29 -0300705 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
Henry Lin597c56e2019-05-22 14:33:57 +0300706 len, seg->bounce_len);
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300707 seg->bounce_len = 0;
708 seg->bounce_offs = 0;
709}
710
Sarah Sharpae636742009-04-29 19:02:31 -0700711/*
712 * When we get a command completion for a Stop Endpoint Command, we need to
713 * unlink any cancelled TDs from the ring. There are two ways to do that:
714 *
715 * 1. If the HW was in the middle of processing the TD that needs to be
716 * cancelled, then we must move the ring's dequeue pointer past the last TRB
717 * in the TD with a Set Dequeue Pointer Command.
718 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
719 * bit cleared) so that the HW will skip over them.
720 */
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +0300721static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
Andiry Xube88fe42010-10-14 07:22:57 -0700722 union xhci_trb *trb, struct xhci_event_cmd *event)
Sarah Sharpae636742009-04-29 19:02:31 -0700723{
Sarah Sharpae636742009-04-29 19:02:31 -0700724 unsigned int ep_index;
725 struct xhci_ring *ep_ring;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700726 struct xhci_virt_ep *ep;
Randy Dunlap326b4812010-04-19 08:53:50 -0700727 struct xhci_td *cur_td = NULL;
Sarah Sharpae636742009-04-29 19:02:31 -0700728 struct xhci_td *last_unlinked_td;
Felipe Balbi19a7d0d62017-04-07 17:56:57 +0300729 struct xhci_ep_ctx *ep_ctx;
730 struct xhci_virt_device *vdev;
Mathias Nymancdd504e2017-06-02 16:36:24 +0300731 u64 hw_deq;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700732 struct xhci_dequeue_state deq_state;
Sarah Sharpae636742009-04-29 19:02:31 -0700733
Xenia Ragiadakoubc752bd2013-09-09 13:29:59 +0300734 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
Mathias Nyman9ea18332014-05-08 19:26:02 +0300735 if (!xhci->devs[slot_id])
Andiry Xube88fe42010-10-14 07:22:57 -0700736 xhci_warn(xhci, "Stop endpoint command "
737 "completion for disabled slot %u\n",
738 slot_id);
739 return;
740 }
741
Sarah Sharpae636742009-04-29 19:02:31 -0700742 memset(&deq_state, 0, sizeof(deq_state));
Matt Evans28ccd292011-03-29 13:40:46 +1100743 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
Felipe Balbi19a7d0d62017-04-07 17:56:57 +0300744
745 vdev = xhci->devs[slot_id];
746 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
747 trace_xhci_handle_cmd_stop_ep(ep_ctx);
748
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700749 ep = &xhci->devs[slot_id]->eps[ep_index];
Felipe Balbi04861f82017-01-23 14:20:09 +0200750 last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
751 struct xhci_td, cancelled_td_list);
Sarah Sharpae636742009-04-29 19:02:31 -0700752
Sarah Sharp678539c2009-10-27 10:55:52 -0700753 if (list_empty(&ep->cancelled_td_list)) {
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700754 xhci_stop_watchdog_timer_in_irq(xhci, ep);
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700755 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -0700756 return;
Sarah Sharp678539c2009-10-27 10:55:52 -0700757 }
Sarah Sharpae636742009-04-29 19:02:31 -0700758
759 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
760 * We have the xHCI lock, so nothing can modify this list until we drop
761 * it. We're also in the event handler, so we can't get re-interrupted
762 * if another Stop Endpoint command completes
763 */
Felipe Balbi04861f82017-01-23 14:20:09 +0200764 list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) {
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300765 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
766 "Removing canceled TD starting at 0x%llx (dma).",
Sarah Sharp79688ac2011-12-19 16:56:04 -0800767 (unsigned long long)xhci_trb_virt_to_dma(
768 cur_td->start_seg, cur_td->first_trb));
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700769 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
770 if (!ep_ring) {
771 /* This shouldn't happen unless a driver is mucking
772 * with the stream ID after submission. This will
773 * leave the TD on the hardware ring, and the hardware
774 * will try to execute it, and may access a buffer
775 * that has already been freed. In the best case, the
776 * hardware will execute it, and the event handler will
777 * ignore the completion event for that TD, since it was
778 * removed from the td_list for that endpoint. In
779 * short, don't muck with the stream ID after
780 * submission.
781 */
782 xhci_warn(xhci, "WARN Cancelled URB %p "
783 "has invalid stream ID %u.\n",
784 cur_td->urb,
785 cur_td->urb->stream_id);
786 goto remove_finished_td;
787 }
Sarah Sharpae636742009-04-29 19:02:31 -0700788 /*
789 * If we stopped on the TD we need to cancel, then we have to
790 * move the xHC endpoint ring dequeue pointer past this TD.
791 */
Mathias Nymancdd504e2017-06-02 16:36:24 +0300792 hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index,
793 cur_td->urb->stream_id);
794 hw_deq &= ~0xf;
795
796 if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb,
797 cur_td->last_trb, hw_deq, false)) {
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700798 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
Mathias Nymancdd504e2017-06-02 16:36:24 +0300799 cur_td->urb->stream_id,
800 cur_td, &deq_state);
801 } else {
Sarah Sharp522989a2011-07-29 12:44:32 -0700802 td_to_noop(xhci, ep_ring, cur_td, false);
Mathias Nymancdd504e2017-06-02 16:36:24 +0300803 }
804
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700805remove_finished_td:
Sarah Sharpae636742009-04-29 19:02:31 -0700806 /*
807 * The event handler won't see a completion for this TD anymore,
808 * so remove it from the endpoint ring's TD list. Keep it in
809 * the cancelled TD list for URB completion later.
810 */
Sarah Sharp585df1d2011-08-02 15:43:40 -0700811 list_del_init(&cur_td->td_list);
Sarah Sharpae636742009-04-29 19:02:31 -0700812 }
Felipe Balbi04861f82017-01-23 14:20:09 +0200813
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700814 xhci_stop_watchdog_timer_in_irq(xhci, ep);
Sarah Sharpae636742009-04-29 19:02:31 -0700815
816 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
817 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
Hans de Goede1e3452e2014-08-20 16:41:52 +0300818 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
Mathias Nyman87907362017-06-02 16:36:23 +0300819 &deq_state);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700820 xhci_ring_cmd_db(xhci);
Sarah Sharpae636742009-04-29 19:02:31 -0700821 } else {
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700822 /* Otherwise ring the doorbell(s) to restart queued transfers */
823 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -0700824 }
Florian Wolter526867c2013-08-14 10:33:16 +0200825
Sarah Sharpae636742009-04-29 19:02:31 -0700826 /*
827 * Drop the lock and complete the URBs in the cancelled TD list.
828 * New TDs to be cancelled might be added to the end of the list before
829 * we can complete all the URBs for the TDs we already unlinked.
830 * So stop when we've completed the URB for the last TD we unlinked.
831 */
832 do {
Felipe Balbi04861f82017-01-23 14:20:09 +0200833 cur_td = list_first_entry(&ep->cancelled_td_list,
Sarah Sharpae636742009-04-29 19:02:31 -0700834 struct xhci_td, cancelled_td_list);
Sarah Sharp585df1d2011-08-02 15:43:40 -0700835 list_del_init(&cur_td->cancelled_td_list);
Sarah Sharpae636742009-04-29 19:02:31 -0700836
837 /* Clean up the cancelled URB */
Sarah Sharpae636742009-04-29 19:02:31 -0700838 /* Doesn't matter what we pass for status, since the core will
839 * just overwrite it (because the URB has been unlinked).
840 */
Arnd Bergmannf76a28a2016-06-30 14:26:17 +0200841 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
Felipe Balbia60f2f22017-01-23 14:20:14 +0200842 xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
Mathias Nyman2a721262016-11-11 15:13:24 +0200843 inc_td_cnt(cur_td->urb);
844 if (last_td_in_urb(cur_td))
845 xhci_giveback_urb_in_irq(xhci, cur_td, 0);
Sarah Sharpae636742009-04-29 19:02:31 -0700846
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700847 /* Stop processing the cancelled list if the watchdog timer is
848 * running.
849 */
850 if (xhci->xhc_state & XHCI_STATE_DYING)
851 return;
Sarah Sharpae636742009-04-29 19:02:31 -0700852 } while (cur_td != last_unlinked_td);
853
854 /* Return to the event handler with xhci->lock re-acquired */
855}
856
Sarah Sharp50e87252014-02-21 09:27:30 -0800857static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
858{
859 struct xhci_td *cur_td;
Felipe Balbia54cfae2017-01-23 14:20:17 +0200860 struct xhci_td *tmp;
Sarah Sharp50e87252014-02-21 09:27:30 -0800861
Felipe Balbia54cfae2017-01-23 14:20:17 +0200862 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
Sarah Sharp50e87252014-02-21 09:27:30 -0800863 list_del_init(&cur_td->td_list);
Felipe Balbia54cfae2017-01-23 14:20:17 +0200864
Sarah Sharp50e87252014-02-21 09:27:30 -0800865 if (!list_empty(&cur_td->cancelled_td_list))
866 list_del_init(&cur_td->cancelled_td_list);
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300867
Felipe Balbia60f2f22017-01-23 14:20:14 +0200868 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
Mathias Nyman2a721262016-11-11 15:13:24 +0200869
870 inc_td_cnt(cur_td->urb);
871 if (last_td_in_urb(cur_td))
872 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
Sarah Sharp50e87252014-02-21 09:27:30 -0800873 }
874}
875
876static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
877 int slot_id, int ep_index)
878{
879 struct xhci_td *cur_td;
Felipe Balbia54cfae2017-01-23 14:20:17 +0200880 struct xhci_td *tmp;
Sarah Sharp50e87252014-02-21 09:27:30 -0800881 struct xhci_virt_ep *ep;
882 struct xhci_ring *ring;
883
884 ep = &xhci->devs[slot_id]->eps[ep_index];
Sarah Sharp21d0e512014-02-21 14:29:02 -0800885 if ((ep->ep_state & EP_HAS_STREAMS) ||
886 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
887 int stream_id;
888
Mathias Nyman4b895862017-07-20 14:48:26 +0300889 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
Sarah Sharp21d0e512014-02-21 14:29:02 -0800890 stream_id++) {
Mathias Nyman4b895862017-07-20 14:48:26 +0300891 ring = ep->stream_info->stream_rings[stream_id];
892 if (!ring)
893 continue;
894
Sarah Sharp21d0e512014-02-21 14:29:02 -0800895 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
896 "Killing URBs for slot ID %u, ep index %u, stream %u",
Mathias Nyman4b895862017-07-20 14:48:26 +0300897 slot_id, ep_index, stream_id);
898 xhci_kill_ring_urbs(xhci, ring);
Sarah Sharp21d0e512014-02-21 14:29:02 -0800899 }
900 } else {
901 ring = ep->ring;
902 if (!ring)
903 return;
904 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
905 "Killing URBs for slot ID %u, ep index %u",
906 slot_id, ep_index);
907 xhci_kill_ring_urbs(xhci, ring);
908 }
Mathias Nyman2a721262016-11-11 15:13:24 +0200909
Felipe Balbia54cfae2017-01-23 14:20:17 +0200910 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
911 cancelled_td_list) {
912 list_del_init(&cur_td->cancelled_td_list);
Mathias Nyman2a721262016-11-11 15:13:24 +0200913 inc_td_cnt(cur_td->urb);
Felipe Balbia54cfae2017-01-23 14:20:17 +0200914
Mathias Nyman2a721262016-11-11 15:13:24 +0200915 if (last_td_in_urb(cur_td))
916 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
Sarah Sharp50e87252014-02-21 09:27:30 -0800917 }
918}
919
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300920/*
921 * host controller died, register read returns 0xffffffff
922 * Complete pending commands, mark them ABORTED.
923 * URBs need to be given back as usb core might be waiting with device locks
924 * held for the URBs to finish during device disconnect, blocking host remove.
925 *
926 * Call with xhci->lock held.
927 * lock is relased and re-acquired while giving back urb.
928 */
929void xhci_hc_died(struct xhci_hcd *xhci)
930{
931 int i, j;
932
933 if (xhci->xhc_state & XHCI_STATE_DYING)
934 return;
935
936 xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
937 xhci->xhc_state |= XHCI_STATE_DYING;
938
939 xhci_cleanup_command_queue(xhci);
940
941 /* return any pending urbs, remove may be waiting for them */
942 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
943 if (!xhci->devs[i])
944 continue;
945 for (j = 0; j < 31; j++)
946 xhci_kill_endpoint_urbs(xhci, i, j);
947 }
948
949 /* inform usb core hc died if PCI remove isn't already handling it */
950 if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
951 usb_hc_died(xhci_to_hcd(xhci));
952}
953
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700954/* Watchdog timer function for when a stop endpoint command fails to complete.
955 * In this case, we assume the host controller is broken or dying or dead. The
956 * host may still be completing some other events, so we have to be careful to
957 * let the event ring handler and the URB dequeueing/enqueueing functions know
958 * through xhci->state.
959 *
960 * The timer may also fire if the host takes a very long time to respond to the
961 * command, and the stop endpoint command completion handler cannot delete the
962 * timer before the timer function is called. Another endpoint cancellation may
963 * sneak in before the timer function can grab the lock, and that may queue
964 * another stop endpoint command and add the timer back. So we cannot use a
965 * simple flag to say whether there is a pending stop endpoint command for a
966 * particular endpoint.
967 *
Mathias Nymanf9926592017-01-23 14:19:53 +0200968 * Instead we use a combination of that flag and checking if a new timer is
969 * pending.
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700970 */
Kees Cook66a45502017-10-16 16:16:58 -0700971void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700972{
Kees Cook66a45502017-10-16 16:16:58 -0700973 struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
974 struct xhci_hcd *xhci = ep->xhci;
Don Zickusf43d6232011-10-20 23:52:14 -0400975 unsigned long flags;
Mathias Nyman9c1aa362020-03-12 16:45:11 +0200976 u32 usbsts;
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700977
Don Zickusf43d6232011-10-20 23:52:14 -0400978 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700979
Mathias Nymanf9926592017-01-23 14:19:53 +0200980 /* bail out if cmd completed but raced with stop ep watchdog timer.*/
981 if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
982 timer_pending(&ep->stop_cmd_timer)) {
Don Zickusf43d6232011-10-20 23:52:14 -0400983 spin_unlock_irqrestore(&xhci->lock, flags);
Mathias Nymanf9926592017-01-23 14:19:53 +0200984 xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700985 return;
986 }
Mathias Nyman9c1aa362020-03-12 16:45:11 +0200987 usbsts = readl(&xhci->op_regs->status);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700988
989 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
Mathias Nyman9c1aa362020-03-12 16:45:11 +0200990 xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(usbsts));
991
Mathias Nymanf9926592017-01-23 14:19:53 +0200992 ep->ep_state &= ~EP_STOP_CMD_PENDING;
993
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300994 xhci_halt(xhci);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700995
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300996 /*
997 * handle a stop endpoint cmd timeout as if host died (-ENODEV).
998 * In the future we could distinguish between -ENODEV and -ETIMEDOUT
999 * and try to recover a -ETIMEDOUT with a host controller reset
1000 */
1001 xhci_hc_died(xhci);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001002
Don Zickusf43d6232011-10-20 23:52:14 -04001003 spin_unlock_irqrestore(&xhci->lock, flags);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +03001004 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +03001005 "xHCI host controller is dead.");
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001006}
1007
Andiry Xub008df62012-03-05 17:49:34 +08001008static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1009 struct xhci_virt_device *dev,
1010 struct xhci_ring *ep_ring,
1011 unsigned int ep_index)
1012{
1013 union xhci_trb *dequeue_temp;
1014 int num_trbs_free_temp;
1015 bool revert = false;
1016
1017 num_trbs_free_temp = ep_ring->num_trbs_free;
1018 dequeue_temp = ep_ring->dequeue;
1019
Sarah Sharp0d9f78a2012-06-21 16:28:30 -07001020 /* If we get two back-to-back stalls, and the first stalled transfer
1021 * ends just before a link TRB, the dequeue pointer will be left on
1022 * the link TRB by the code in the while loop. So we have to update
1023 * the dequeue pointer one segment further, or we'll jump off
1024 * the segment into la-la-land.
1025 */
Mathias Nyman2d98ef42016-06-21 10:58:04 +03001026 if (trb_is_link(ep_ring->dequeue)) {
Sarah Sharp0d9f78a2012-06-21 16:28:30 -07001027 ep_ring->deq_seg = ep_ring->deq_seg->next;
1028 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1029 }
1030
Andiry Xub008df62012-03-05 17:49:34 +08001031 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1032 /* We have more usable TRBs */
1033 ep_ring->num_trbs_free++;
1034 ep_ring->dequeue++;
Mathias Nyman2d98ef42016-06-21 10:58:04 +03001035 if (trb_is_link(ep_ring->dequeue)) {
Andiry Xub008df62012-03-05 17:49:34 +08001036 if (ep_ring->dequeue ==
1037 dev->eps[ep_index].queued_deq_ptr)
1038 break;
1039 ep_ring->deq_seg = ep_ring->deq_seg->next;
1040 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1041 }
1042 if (ep_ring->dequeue == dequeue_temp) {
1043 revert = true;
1044 break;
1045 }
1046 }
1047
1048 if (revert) {
1049 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1050 ep_ring->num_trbs_free = num_trbs_free_temp;
1051 }
1052}
1053
Sarah Sharpae636742009-04-29 19:02:31 -07001054/*
1055 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1056 * we need to clear the set deq pending flag in the endpoint ring state, so that
1057 * the TD queueing code can ring the doorbell again. We also need to ring the
1058 * endpoint doorbell to restart the ring, but only if there aren't more
1059 * cancellations pending.
1060 */
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001061static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001062 union xhci_trb *trb, u32 cmd_comp_code)
Sarah Sharpae636742009-04-29 19:02:31 -07001063{
Sarah Sharpae636742009-04-29 19:02:31 -07001064 unsigned int ep_index;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001065 unsigned int stream_id;
Sarah Sharpae636742009-04-29 19:02:31 -07001066 struct xhci_ring *ep_ring;
1067 struct xhci_virt_device *dev;
Hans de Goede9aad95e2013-10-04 00:29:49 +02001068 struct xhci_virt_ep *ep;
John Yound115b042009-07-27 12:05:15 -07001069 struct xhci_ep_ctx *ep_ctx;
1070 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpae636742009-04-29 19:02:31 -07001071
Matt Evans28ccd292011-03-29 13:40:46 +11001072 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1073 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
Sarah Sharpae636742009-04-29 19:02:31 -07001074 dev = xhci->devs[slot_id];
Hans de Goede9aad95e2013-10-04 00:29:49 +02001075 ep = &dev->eps[ep_index];
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001076
1077 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
1078 if (!ep_ring) {
Oliver Neukume587b8b2014-01-08 17:13:11 +01001079 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001080 stream_id);
1081 /* XXX: Harmless??? */
Hans de Goede0d4976e2014-08-20 16:41:55 +03001082 goto cleanup;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001083 }
1084
John Yound115b042009-07-27 12:05:15 -07001085 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1086 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001087 trace_xhci_handle_cmd_set_deq(slot_ctx);
1088 trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
Sarah Sharpae636742009-04-29 19:02:31 -07001089
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001090 if (cmd_comp_code != COMP_SUCCESS) {
Sarah Sharpae636742009-04-29 19:02:31 -07001091 unsigned int ep_state;
1092 unsigned int slot_state;
1093
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001094 switch (cmd_comp_code) {
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001095 case COMP_TRB_ERROR:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001096 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
Sarah Sharpae636742009-04-29 19:02:31 -07001097 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001098 case COMP_CONTEXT_STATE_ERROR:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001099 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
Mathias Nyman5071e6b2016-11-11 15:13:28 +02001100 ep_state = GET_EP_CTX_STATE(ep_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11001101 slot_state = le32_to_cpu(slot_ctx->dev_state);
Sarah Sharpae636742009-04-29 19:02:31 -07001102 slot_state = GET_SLOT_STATE(slot_state);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +03001103 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1104 "Slot state = %u, EP state = %u",
Sarah Sharpae636742009-04-29 19:02:31 -07001105 slot_state, ep_state);
1106 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001107 case COMP_SLOT_NOT_ENABLED_ERROR:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001108 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1109 slot_id);
Sarah Sharpae636742009-04-29 19:02:31 -07001110 break;
1111 default:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001112 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1113 cmd_comp_code);
Sarah Sharpae636742009-04-29 19:02:31 -07001114 break;
1115 }
1116 /* OK what do we do now? The endpoint state is hosed, and we
1117 * should never get to this point if the synchronization between
1118 * queueing, and endpoint state are correct. This might happen
1119 * if the device gets disconnected after we've finished
1120 * cancelling URBs, which might not be an error...
1121 */
1122 } else {
Hans de Goede9aad95e2013-10-04 00:29:49 +02001123 u64 deq;
1124 /* 4.6.10 deq ptr is written to the stream ctx for streams */
1125 if (ep->ep_state & EP_HAS_STREAMS) {
1126 struct xhci_stream_ctx *ctx =
1127 &ep->stream_info->stream_ctx_array[stream_id];
1128 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1129 } else {
1130 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1131 }
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +03001132 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
Hans de Goede9aad95e2013-10-04 00:29:49 +02001133 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1134 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1135 ep->queued_deq_ptr) == deq) {
Sarah Sharpbf161e82011-02-23 15:46:42 -08001136 /* Update the ring's dequeue segment and dequeue pointer
1137 * to reflect the new position.
1138 */
Andiry Xub008df62012-03-05 17:49:34 +08001139 update_ring_for_set_deq_completion(xhci, dev,
1140 ep_ring, ep_index);
Sarah Sharpbf161e82011-02-23 15:46:42 -08001141 } else {
Oliver Neukume587b8b2014-01-08 17:13:11 +01001142 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
Sarah Sharpbf161e82011-02-23 15:46:42 -08001143 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
Hans de Goede9aad95e2013-10-04 00:29:49 +02001144 ep->queued_deq_seg, ep->queued_deq_ptr);
Sarah Sharpbf161e82011-02-23 15:46:42 -08001145 }
Sarah Sharpae636742009-04-29 19:02:31 -07001146 }
1147
Hans de Goede0d4976e2014-08-20 16:41:55 +03001148cleanup:
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001149 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
Sarah Sharpbf161e82011-02-23 15:46:42 -08001150 dev->eps[ep_index].queued_deq_seg = NULL;
1151 dev->eps[ep_index].queued_deq_ptr = NULL;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001152 /* Restart any rings with pending URBs */
1153 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -07001154}
1155
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001156static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001157 union xhci_trb *trb, u32 cmd_comp_code)
Sarah Sharpa1587d92009-07-27 12:03:15 -07001158{
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001159 struct xhci_virt_device *vdev;
1160 struct xhci_ep_ctx *ep_ctx;
Sarah Sharpa1587d92009-07-27 12:03:15 -07001161 unsigned int ep_index;
1162
Matt Evans28ccd292011-03-29 13:40:46 +11001163 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001164 vdev = xhci->devs[slot_id];
1165 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
1166 trace_xhci_handle_cmd_reset_ep(ep_ctx);
1167
Sarah Sharpa1587d92009-07-27 12:03:15 -07001168 /* This command will only fail if the endpoint wasn't halted,
1169 * but we don't care.
1170 */
Xenia Ragiadakoua0254322013-08-06 07:52:46 +03001171 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001172 "Ignoring reset ep completion code of %u", cmd_comp_code);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001173
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001174 /* HW with the reset endpoint quirk needs to have a configure endpoint
1175 * command complete before the endpoint can be used. Queue that here
1176 * because the HW can't handle two commands being queued in a row.
1177 */
1178 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001179 struct xhci_command *command;
Lu Baolu74e0b562017-04-07 17:57:05 +03001180
Mathias Nyman103afda2017-12-08 17:59:08 +02001181 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
Lu Baolu74e0b562017-04-07 17:57:05 +03001182 if (!command)
Hans de Goedea0ee6192014-07-25 22:01:21 +02001183 return;
Lu Baolu74e0b562017-04-07 17:57:05 +03001184
Xenia Ragiadakou4bdfe4c2013-08-06 07:52:45 +03001185 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1186 "Queueing configure endpoint command");
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001187 xhci_queue_configure_endpoint(xhci, command,
Sarah Sharp913a8a32009-09-04 10:53:13 -07001188 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1189 false);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001190 xhci_ring_cmd_db(xhci);
1191 } else {
Mathias Nymanc3492db2014-11-18 11:27:11 +02001192 /* Clear our internal halted state */
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001193 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001194 }
Mathias Nymanf8f80be2018-09-20 19:13:37 +03001195
1196 /* if this was a soft reset, then restart */
1197 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
1198 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001199}
Sarah Sharpae636742009-04-29 19:02:31 -07001200
Xenia Ragiadakoub244b432013-09-09 13:29:47 +03001201static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
Lu Baoluc2d3d492016-11-11 15:13:31 +02001202 struct xhci_command *command, u32 cmd_comp_code)
Xenia Ragiadakoub244b432013-09-09 13:29:47 +03001203{
1204 if (cmd_comp_code == COMP_SUCCESS)
Lu Baoluc2d3d492016-11-11 15:13:31 +02001205 command->slot_id = slot_id;
Xenia Ragiadakoub244b432013-09-09 13:29:47 +03001206 else
Lu Baoluc2d3d492016-11-11 15:13:31 +02001207 command->slot_id = 0;
Xenia Ragiadakoub244b432013-09-09 13:29:47 +03001208}
1209
Xenia Ragiadakou6c02dd12013-09-09 13:29:48 +03001210static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1211{
1212 struct xhci_virt_device *virt_dev;
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001213 struct xhci_slot_ctx *slot_ctx;
Xenia Ragiadakou6c02dd12013-09-09 13:29:48 +03001214
1215 virt_dev = xhci->devs[slot_id];
1216 if (!virt_dev)
1217 return;
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001218
1219 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1220 trace_xhci_handle_cmd_disable_slot(slot_ctx);
1221
Xenia Ragiadakou6c02dd12013-09-09 13:29:48 +03001222 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1223 /* Delete default control endpoint resources */
1224 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1225 xhci_free_virt_device(xhci, slot_id);
1226}
1227
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001228static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1229 struct xhci_event_cmd *event, u32 cmd_comp_code)
1230{
1231 struct xhci_virt_device *virt_dev;
1232 struct xhci_input_control_ctx *ctrl_ctx;
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001233 struct xhci_ep_ctx *ep_ctx;
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001234 unsigned int ep_index;
1235 unsigned int ep_state;
1236 u32 add_flags, drop_flags;
1237
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001238 /*
1239 * Configure endpoint commands can come from the USB core
1240 * configuration or alt setting changes, or because the HW
1241 * needed an extra configure endpoint command after a reset
1242 * endpoint command or streams were being configured.
1243 * If the command was for a halted endpoint, the xHCI driver
1244 * is not waiting on the configure endpoint command.
1245 */
Mathias Nyman9ea18332014-05-08 19:26:02 +03001246 virt_dev = xhci->devs[slot_id];
Lin Wang4daf9df2015-01-09 16:06:31 +02001247 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001248 if (!ctrl_ctx) {
1249 xhci_warn(xhci, "Could not get input context, bad type.\n");
1250 return;
1251 }
1252
1253 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1254 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1255 /* Input ctx add_flags are the endpoint index plus one */
1256 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1257
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001258 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
1259 trace_xhci_handle_cmd_config_ep(ep_ctx);
1260
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001261 /* A usb_set_interface() call directly after clearing a halted
1262 * condition may race on this quirky hardware. Not worth
1263 * worrying about, since this is prototype hardware. Not sure
1264 * if this will work for streams, but streams support was
1265 * untested on this prototype.
1266 */
1267 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1268 ep_index != (unsigned int) -1 &&
1269 add_flags - SLOT_FLAG == drop_flags) {
1270 ep_state = virt_dev->eps[ep_index].ep_state;
1271 if (!(ep_state & EP_HALTED))
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001272 return;
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001273 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1274 "Completed config ep cmd - "
1275 "last ep index = %d, state = %d",
1276 ep_index, ep_state);
1277 /* Clear internal halted state and restart ring(s) */
1278 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1279 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1280 return;
1281 }
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001282 return;
1283}
1284
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001285static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
1286{
1287 struct xhci_virt_device *vdev;
1288 struct xhci_slot_ctx *slot_ctx;
1289
1290 vdev = xhci->devs[slot_id];
1291 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1292 trace_xhci_handle_cmd_addr_dev(slot_ctx);
1293}
1294
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001295static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1296 struct xhci_event_cmd *event)
1297{
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001298 struct xhci_virt_device *vdev;
1299 struct xhci_slot_ctx *slot_ctx;
1300
1301 vdev = xhci->devs[slot_id];
1302 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1303 trace_xhci_handle_cmd_reset_dev(slot_ctx);
1304
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001305 xhci_dbg(xhci, "Completed reset device command.\n");
Mathias Nyman9ea18332014-05-08 19:26:02 +03001306 if (!xhci->devs[slot_id])
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001307 xhci_warn(xhci, "Reset device command completion "
1308 "for disabled slot %u\n", slot_id);
1309}
1310
Xenia Ragiadakou2c070822013-09-09 13:29:52 +03001311static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1312 struct xhci_event_cmd *event)
1313{
1314 if (!(xhci->quirks & XHCI_NEC_HOST)) {
Lu Baoluf4c8f032016-11-11 15:13:25 +02001315 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
Xenia Ragiadakou2c070822013-09-09 13:29:52 +03001316 return;
1317 }
1318 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1319 "NEC firmware version %2x.%02x",
1320 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1321 NEC_FW_MINOR(le32_to_cpu(event->status)));
1322}
1323
Mathias Nyman9ea18332014-05-08 19:26:02 +03001324static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001325{
1326 list_del(&cmd->cmd_list);
Mathias Nyman9ea18332014-05-08 19:26:02 +03001327
1328 if (cmd->completion) {
1329 cmd->status = status;
1330 complete(cmd->completion);
1331 } else {
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001332 kfree(cmd);
Mathias Nyman9ea18332014-05-08 19:26:02 +03001333 }
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001334}
1335
1336void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1337{
1338 struct xhci_command *cur_cmd, *tmp_cmd;
Jeffy Chend1aad522017-10-06 17:45:28 +03001339 xhci->current_cmd = NULL;
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001340 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001341 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001342}
1343
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02001344void xhci_handle_command_timeout(struct work_struct *work)
Mathias Nymanc311e392014-05-08 19:26:03 +03001345{
1346 struct xhci_hcd *xhci;
Mathias Nymanc311e392014-05-08 19:26:03 +03001347 unsigned long flags;
1348 u64 hw_ring_state;
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02001349
1350 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
Mathias Nymanc311e392014-05-08 19:26:03 +03001351
Mathias Nymanc311e392014-05-08 19:26:03 +03001352 spin_lock_irqsave(&xhci->lock, flags);
Lu Baolu2b985462017-01-03 18:28:46 +02001353
Mathias Nymana5a1b952017-01-03 18:28:48 +02001354 /*
1355 * If timeout work is pending, or current_cmd is NULL, it means we
1356 * raced with command completion. Command is handled so just return.
1357 */
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02001358 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
Lu Baolu2b985462017-01-03 18:28:46 +02001359 spin_unlock_irqrestore(&xhci->lock, flags);
1360 return;
Mathias Nymanc311e392014-05-08 19:26:03 +03001361 }
Lu Baolu2b985462017-01-03 18:28:46 +02001362 /* mark this command to be cancelled */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001363 xhci->current_cmd->status = COMP_COMMAND_ABORTED;
Lu Baolu2b985462017-01-03 18:28:46 +02001364
Mathias Nymanc311e392014-05-08 19:26:03 +03001365 /* Make sure command ring is running before aborting it */
1366 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
Mathias Nymand9f11ba2017-04-07 17:57:01 +03001367 if (hw_ring_state == ~(u64)0) {
1368 xhci_hc_died(xhci);
1369 goto time_out_completed;
1370 }
1371
Mathias Nymanc311e392014-05-08 19:26:03 +03001372 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1373 (hw_ring_state & CMD_RING_RUNNING)) {
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +02001374 /* Prevent new doorbell, and start command abort */
1375 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
Mathias Nymanc311e392014-05-08 19:26:03 +03001376 xhci_dbg(xhci, "Command timeout\n");
Mathias Nymand9f11ba2017-04-07 17:57:01 +03001377 xhci_abort_cmd_ring(xhci, flags);
Lu Baolu4dea7072017-01-03 18:28:49 +02001378 goto time_out_completed;
Mathias Nymanc311e392014-05-08 19:26:03 +03001379 }
Mathias Nyman3425aa02016-06-01 18:09:08 +03001380
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +02001381 /* host removed. Bail out */
1382 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
1383 xhci_dbg(xhci, "host removed, ring start fail?\n");
Mathias Nyman3425aa02016-06-01 18:09:08 +03001384 xhci_cleanup_command_queue(xhci);
Lu Baolu4dea7072017-01-03 18:28:49 +02001385
1386 goto time_out_completed;
Mathias Nyman3425aa02016-06-01 18:09:08 +03001387 }
1388
Mathias Nymanc311e392014-05-08 19:26:03 +03001389 /* command timeout on stopped ring, ring can't be aborted */
1390 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1391 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
Lu Baolu4dea7072017-01-03 18:28:49 +02001392
1393time_out_completed:
Mathias Nymanc311e392014-05-08 19:26:03 +03001394 spin_unlock_irqrestore(&xhci->lock, flags);
1395 return;
1396}
1397
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001398static void handle_cmd_completion(struct xhci_hcd *xhci,
1399 struct xhci_event_cmd *event)
1400{
Matt Evans28ccd292011-03-29 13:40:46 +11001401 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001402 u64 cmd_dma;
1403 dma_addr_t cmd_dequeue_dma;
Xenia Ragiadakoue7a79a12013-09-09 13:29:56 +03001404 u32 cmd_comp_code;
Xenia Ragiadakou9124b122013-09-09 13:29:57 +03001405 union xhci_trb *cmd_trb;
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001406 struct xhci_command *cmd;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001407 u32 cmd_type;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001408
Matt Evans28ccd292011-03-29 13:40:46 +11001409 cmd_dma = le64_to_cpu(event->cmd_trb);
Xenia Ragiadakou9124b122013-09-09 13:29:57 +03001410 cmd_trb = xhci->cmd_ring->dequeue;
Felipe Balbia37c3f72017-01-23 14:20:19 +02001411
1412 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
1413
Sarah Sharp23e3be12009-04-29 19:05:20 -07001414 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
Xenia Ragiadakou9124b122013-09-09 13:29:57 +03001415 cmd_trb);
Lu Baoluf4c8f032016-11-11 15:13:25 +02001416 /*
1417 * Check whether the completion event is for our internal kept
1418 * command.
1419 */
1420 if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
1421 xhci_warn(xhci,
1422 "ERROR mismatched command completion event\n");
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001423 return;
1424 }
Elric Fub63f4052012-06-27 16:55:43 +08001425
Felipe Balbi04861f82017-01-23 14:20:09 +02001426 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001427
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02001428 cancel_delayed_work(&xhci->cmd_timer);
Mathias Nymanc311e392014-05-08 19:26:03 +03001429
Xenia Ragiadakoue7a79a12013-09-09 13:29:56 +03001430 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
Mathias Nymanc311e392014-05-08 19:26:03 +03001431
1432 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
Mathias Nyman604d02a2017-05-17 18:32:05 +03001433 if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +02001434 complete_all(&xhci->cmd_ring_stop_completion);
Mathias Nymanc311e392014-05-08 19:26:03 +03001435 return;
1436 }
Mathias Nyman33be1262016-08-16 10:18:03 +03001437
1438 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1439 xhci_err(xhci,
1440 "Command completion event does not match command\n");
1441 return;
1442 }
1443
Mathias Nymanc311e392014-05-08 19:26:03 +03001444 /*
1445 * Host aborted the command ring, check if the current command was
1446 * supposed to be aborted, otherwise continue normally.
1447 * The command ring is stopped now, but the xHC will issue a Command
1448 * Ring Stopped event which will cause us to restart it.
1449 */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001450 if (cmd_comp_code == COMP_COMMAND_ABORTED) {
Mathias Nymanc311e392014-05-08 19:26:03 +03001451 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001452 if (cmd->status == COMP_COMMAND_ABORTED) {
Baolin Wang2a7cfdf2017-01-03 18:28:47 +02001453 if (xhci->current_cmd == cmd)
1454 xhci->current_cmd = NULL;
Mathias Nymanc311e392014-05-08 19:26:03 +03001455 goto event_handled;
Baolin Wang2a7cfdf2017-01-03 18:28:47 +02001456 }
Elric Fub63f4052012-06-27 16:55:43 +08001457 }
1458
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001459 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1460 switch (cmd_type) {
1461 case TRB_ENABLE_SLOT:
Lu Baoluc2d3d492016-11-11 15:13:31 +02001462 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001463 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001464 case TRB_DISABLE_SLOT:
Xenia Ragiadakou6c02dd12013-09-09 13:29:48 +03001465 xhci_handle_cmd_disable_slot(xhci, slot_id);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001466 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001467 case TRB_CONFIG_EP:
Mathias Nyman9ea18332014-05-08 19:26:02 +03001468 if (!cmd->completion)
1469 xhci_handle_cmd_config_ep(xhci, slot_id, event,
1470 cmd_comp_code);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001471 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001472 case TRB_EVAL_CONTEXT:
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001473 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001474 case TRB_ADDR_DEV:
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001475 xhci_handle_cmd_addr_dev(xhci, slot_id);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001476 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001477 case TRB_STOP_RING:
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001478 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1479 le32_to_cpu(cmd_trb->generic.field[3])));
Mathias Nymana38fe332018-03-16 16:33:02 +02001480 if (!cmd->completion)
1481 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
Sarah Sharpae636742009-04-29 19:02:31 -07001482 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001483 case TRB_SET_DEQ:
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001484 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1485 le32_to_cpu(cmd_trb->generic.field[3])));
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001486 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
Sarah Sharpae636742009-04-29 19:02:31 -07001487 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001488 case TRB_CMD_NOOP:
Mathias Nymanc311e392014-05-08 19:26:03 +03001489 /* Is this an aborted command turned to NO-OP? */
Mathias Nyman604d02a2017-05-17 18:32:05 +03001490 if (cmd->status == COMP_COMMAND_RING_STOPPED)
1491 cmd_comp_code = COMP_COMMAND_RING_STOPPED;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001492 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001493 case TRB_RESET_EP:
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001494 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1495 le32_to_cpu(cmd_trb->generic.field[3])));
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001496 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001497 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001498 case TRB_RESET_DEV:
Mathias Nyman6fcfb0d2014-06-24 17:14:40 +03001499 /* SLOT_ID field in reset device cmd completion event TRB is 0.
1500 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1501 */
1502 slot_id = TRB_TO_SLOT_ID(
1503 le32_to_cpu(cmd_trb->generic.field[3]));
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001504 xhci_handle_cmd_reset_dev(xhci, slot_id, event);
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08001505 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001506 case TRB_NEC_GET_FW:
Xenia Ragiadakou2c070822013-09-09 13:29:52 +03001507 xhci_handle_cmd_nec_get_fw(xhci, event);
Sarah Sharp02386342010-05-24 13:25:28 -07001508 break;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001509 default:
1510 /* Skip over unknown commands on the event ring */
Lu Baoluf4c8f032016-11-11 15:13:25 +02001511 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001512 break;
1513 }
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001514
Mathias Nymanc311e392014-05-08 19:26:03 +03001515 /* restart timer if this wasn't the last command */
Lu Baoludaa47f22017-01-23 14:20:02 +02001516 if (!list_is_singular(&xhci->cmd_list)) {
Felipe Balbi04861f82017-01-23 14:20:09 +02001517 xhci->current_cmd = list_first_entry(&cmd->cmd_list,
1518 struct xhci_command, cmd_list);
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02001519 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
Lu Baolu2b985462017-01-03 18:28:46 +02001520 } else if (xhci->current_cmd == cmd) {
1521 xhci->current_cmd = NULL;
Mathias Nymanc311e392014-05-08 19:26:03 +03001522 }
1523
1524event_handled:
Mathias Nyman9ea18332014-05-08 19:26:02 +03001525 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001526
Andiry Xu3b72fca2012-03-05 17:49:32 +08001527 inc_deq(xhci, xhci->cmd_ring);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001528}
1529
Sarah Sharp02386342010-05-24 13:25:28 -07001530static void handle_vendor_event(struct xhci_hcd *xhci,
1531 union xhci_trb *event)
1532{
1533 u32 trb_type;
1534
Matt Evans28ccd292011-03-29 13:40:46 +11001535 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
Sarah Sharp02386342010-05-24 13:25:28 -07001536 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1537 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1538 handle_cmd_completion(xhci, &event->event_cmd);
1539}
1540
Sarah Sharp623bef92011-11-11 14:57:33 -08001541static void handle_device_notification(struct xhci_hcd *xhci,
1542 union xhci_trb *event)
1543{
1544 u32 slot_id;
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001545 struct usb_device *udev;
Sarah Sharp623bef92011-11-11 14:57:33 -08001546
Xenia Ragiadakou7e76ad42013-09-09 21:03:10 +03001547 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001548 if (!xhci->devs[slot_id]) {
Sarah Sharp623bef92011-11-11 14:57:33 -08001549 xhci_warn(xhci, "Device Notification event for "
1550 "unused slot %u\n", slot_id);
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001551 return;
1552 }
1553
1554 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1555 slot_id);
1556 udev = xhci->devs[slot_id]->udev;
1557 if (udev && udev->parent)
1558 usb_wakeup_notification(udev->parent, udev->portnum);
Sarah Sharp623bef92011-11-11 14:57:33 -08001559}
1560
Cherian, George11644a72018-11-09 17:21:22 +02001561/*
1562 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1563 * Controller.
1564 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1565 * If a connection to a USB 1 device is followed by another connection
1566 * to a USB 2 device.
1567 *
1568 * Reset the PHY after the USB device is disconnected if device speed
1569 * is less than HCD_USB3.
1570 * Retry the reset sequence max of 4 times checking the PLL lock status.
1571 *
1572 */
1573static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
1574{
1575 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1576 u32 pll_lock_check;
1577 u32 retry_count = 4;
1578
1579 do {
1580 /* Assert PHY reset */
1581 writel(0x6F, hcd->regs + 0x1048);
1582 udelay(10);
1583 /* De-assert the PHY reset */
1584 writel(0x7F, hcd->regs + 0x1048);
1585 udelay(200);
1586 pll_lock_check = readl(hcd->regs + 0x1070);
1587 } while (!(pll_lock_check & 0x1) && --retry_count);
1588}
1589
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001590static void handle_port_status(struct xhci_hcd *xhci,
1591 union xhci_trb *event)
1592{
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001593 struct usb_hcd *hcd;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001594 u32 port_id;
Mathias Nyman76a0f322017-08-16 14:23:23 +03001595 u32 portsc, cmd_reg;
Sarah Sharp518e8482010-12-15 11:56:29 -08001596 int max_ports;
Andiry Xu56192532010-10-14 07:23:00 -07001597 int slot_id;
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001598 unsigned int hcd_portnum;
Sarah Sharp20b67cf2010-12-15 12:47:14 -08001599 struct xhci_bus_state *bus_state;
Sarah Sharp386139d2011-03-24 08:02:58 -07001600 bool bogus_port_status = false;
Mathias Nyman52c77552018-05-21 16:39:57 +03001601 struct xhci_port *port;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001602
1603 /* Port status change events always have a successful completion code */
Lu Baoluf4c8f032016-11-11 15:13:25 +02001604 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
1605 xhci_warn(xhci,
1606 "WARN: xHC returned failed port status event\n");
1607
Matt Evans28ccd292011-03-29 13:40:46 +11001608 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
Sarah Sharp518e8482010-12-15 11:56:29 -08001609 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
Mathias Nymand70d5a82019-04-26 16:23:30 +03001610
Sarah Sharp518e8482010-12-15 11:56:29 -08001611 if ((port_id <= 0) || (port_id > max_ports)) {
Mathias Nymand70d5a82019-04-26 16:23:30 +03001612 xhci_warn(xhci, "Port change event with invalid port ID %d\n",
1613 port_id);
Peter Chen09ce0c02013-03-20 09:30:00 +08001614 inc_deq(xhci, xhci->event_ring);
1615 return;
Andiry Xu56192532010-10-14 07:23:00 -07001616 }
1617
Mathias Nyman52c77552018-05-21 16:39:57 +03001618 port = &xhci->hw_ports[port_id - 1];
1619 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
Mathias Nymand70d5a82019-04-26 16:23:30 +03001620 xhci_warn(xhci, "Port change event, no port for port ID %u\n",
1621 port_id);
Sarah Sharp386139d2011-03-24 08:02:58 -07001622 bogus_port_status = true;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001623 goto cleanup;
Sarah Sharp5308a912010-12-01 11:34:59 -08001624 }
1625
Mathias Nyman12453742018-11-09 17:21:18 +02001626 /* We might get interrupts after shared_hcd is removed */
1627 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
1628 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
1629 bogus_port_status = true;
1630 goto cleanup;
1631 }
1632
Mathias Nyman52c77552018-05-21 16:39:57 +03001633 hcd = port->rhub->hcd;
Mathias Nymanf6187f42018-12-07 16:19:30 +02001634 bus_state = &port->rhub->bus_state;
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001635 hcd_portnum = port->hcd_portnum;
Mathias Nyman52c77552018-05-21 16:39:57 +03001636 portsc = readl(port->addr);
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001637
Mathias Nymand70d5a82019-04-26 16:23:30 +03001638 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
1639 hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
1640
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001641 trace_xhci_handle_port_status(hcd_portnum, portsc);
Mathias Nyman8ca13582017-08-16 14:23:24 +03001642
Sarah Sharp7111ebc2010-12-14 13:24:55 -08001643 if (hcd->state == HC_STATE_SUSPENDED) {
Andiry Xu56192532010-10-14 07:23:00 -07001644 xhci_dbg(xhci, "resume root hub\n");
1645 usb_hcd_resume_root_hub(hcd);
1646 }
1647
Mathias Nymanb8c3b712019-06-18 17:27:47 +03001648 if (hcd->speed >= HCD_USB3 &&
1649 (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
1650 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
1651 if (slot_id && xhci->devs[slot_id])
1652 xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
Mathias Nymanb8c3b712019-06-18 17:27:47 +03001653 }
Zhuang Jin Canfac42712015-07-21 17:20:30 +03001654
Mathias Nyman76a0f322017-08-16 14:23:23 +03001655 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
Andiry Xu56192532010-10-14 07:23:00 -07001656 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1657
Mathias Nyman76a0f322017-08-16 14:23:23 +03001658 cmd_reg = readl(&xhci->op_regs->command);
1659 if (!(cmd_reg & CMD_RUN)) {
Andiry Xu56192532010-10-14 07:23:00 -07001660 xhci_warn(xhci, "xHC is not running.\n");
1661 goto cleanup;
1662 }
1663
Mathias Nyman76a0f322017-08-16 14:23:23 +03001664 if (DEV_SUPERSPEED_ANY(portsc)) {
Sarah Sharpd93814c2012-01-24 16:39:02 -08001665 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001666 /* Set a flag to say the port signaled remote wakeup,
1667 * so we can tell the difference between the end of
1668 * device and host initiated resume.
1669 */
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001670 bus_state->port_remote_wakeup |= 1 << hcd_portnum;
Mathias Nymaneaefcf22018-05-21 16:40:00 +03001671 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
Mathias Nyman057d4762019-12-11 16:20:03 +02001672 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
Mathias Nyman6b7f40f2018-05-21 16:39:59 +03001673 xhci_set_link_state(xhci, port, XDEV_U0);
Sarah Sharpd93814c2012-01-24 16:39:02 -08001674 /* Need to wait until the next link state change
1675 * indicates the device is actually in U0.
1676 */
1677 bogus_port_status = true;
1678 goto cleanup;
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001679 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
Andiry Xu56192532010-10-14 07:23:00 -07001680 xhci_dbg(xhci, "resume HS port %d\n", port_id);
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001681 bus_state->resume_done[hcd_portnum] = jiffies +
Felipe Balbib9e45182015-02-13 14:39:13 -06001682 msecs_to_jiffies(USB_RESUME_TIMEOUT);
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001683 set_bit(hcd_portnum, &bus_state->resuming_ports);
Anshuman Gupta0914ea62017-10-05 11:21:46 +03001684 /* Do the rest in GetPortStatus after resume time delay.
1685 * Avoid polling roothub status before that so that a
1686 * usb device auto-resume latency around ~40ms.
1687 */
1688 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
Andiry Xu56192532010-10-14 07:23:00 -07001689 mod_timer(&hcd->rh_timer,
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001690 bus_state->resume_done[hcd_portnum]);
Anshuman Gupta330e2d62018-09-20 19:13:40 +03001691 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
Anshuman Gupta0914ea62017-10-05 11:21:46 +03001692 bogus_port_status = true;
Andiry Xu56192532010-10-14 07:23:00 -07001693 }
1694 }
1695
Mathias Nyman6cbcf592019-03-22 17:50:15 +02001696 if ((portsc & PORT_PLC) &&
1697 DEV_SUPERSPEED_ANY(portsc) &&
1698 ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
1699 (portsc & PORT_PLS_MASK) == XDEV_U1 ||
1700 (portsc & PORT_PLS_MASK) == XDEV_U2)) {
Sarah Sharpd93814c2012-01-24 16:39:02 -08001701 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
Kai-Heng Feng0200b9f72020-03-12 16:45:15 +02001702 complete(&bus_state->u3exit_done[hcd_portnum]);
Mathias Nyman6cbcf592019-03-22 17:50:15 +02001703 /* We've just brought the device into U0/1/2 through either the
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001704 * Resume state after a device remote wakeup, or through the
1705 * U3Exit state after a host-initiated resume. If it's a device
1706 * initiated remote wake, don't pass up the link state change,
1707 * so the roothub behavior is consistent with external
1708 * USB 3.0 hub behavior.
1709 */
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001710 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
Sarah Sharpd93814c2012-01-24 16:39:02 -08001711 if (slot_id && xhci->devs[slot_id])
1712 xhci_ring_device(xhci, slot_id);
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001713 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
Mathias Nymaneaefcf22018-05-21 16:40:00 +03001714 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001715 usb_wakeup_notification(hcd->self.root_hub,
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001716 hcd_portnum + 1);
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001717 bogus_port_status = true;
1718 goto cleanup;
1719 }
Sarah Sharpd93814c2012-01-24 16:39:02 -08001720 }
1721
Sarah Sharp8b3d4572013-08-20 08:12:12 -07001722 /*
1723 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1724 * RExit to a disconnect state). If so, let the the driver know it's
1725 * out of the RExit state.
1726 */
Aaron Ma958c0bd2018-11-09 17:21:20 +02001727 if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001728 test_and_clear_bit(hcd_portnum,
Sarah Sharp8b3d4572013-08-20 08:12:12 -07001729 &bus_state->rexit_ports)) {
Mathias Nyman74e6ad52018-05-21 16:39:58 +03001730 complete(&bus_state->rexit_done[hcd_portnum]);
Sarah Sharp8b3d4572013-08-20 08:12:12 -07001731 bogus_port_status = true;
1732 goto cleanup;
1733 }
1734
Cherian, George11644a72018-11-09 17:21:22 +02001735 if (hcd->speed < HCD_USB3) {
Mathias Nymaneaefcf22018-05-21 16:40:00 +03001736 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
Cherian, George11644a72018-11-09 17:21:22 +02001737 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
1738 (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
1739 xhci_cavium_reset_phy_quirk(xhci);
1740 }
Andiry Xu6fd45622011-09-23 14:19:50 -07001741
Andiry Xu56192532010-10-14 07:23:00 -07001742cleanup:
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001743 /* Update event ring dequeue pointer before dropping the lock */
Andiry Xu3b72fca2012-03-05 17:49:32 +08001744 inc_deq(xhci, xhci->event_ring);
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001745
Sarah Sharp386139d2011-03-24 08:02:58 -07001746 /* Don't make the USB core poll the roothub if we got a bad port status
1747 * change event. Besides, at that point we can't tell which roothub
1748 * (USB 2.0 or USB 3.0) to kick.
1749 */
1750 if (bogus_port_status)
1751 return;
1752
Sarah Sharpc52804a2012-11-27 12:30:23 -08001753 /*
1754 * xHCI port-status-change events occur when the "or" of all the
1755 * status-change bits in the portsc register changes from 0 to 1.
1756 * New status changes won't cause an event if any other change
1757 * bits are still set. When an event occurs, switch over to
1758 * polling to avoid losing status changes.
1759 */
1760 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1761 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001762 spin_unlock(&xhci->lock);
1763 /* Pass this up to the core */
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001764 usb_hcd_poll_rh_status(hcd);
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001765 spin_lock(&xhci->lock);
1766}
1767
1768/*
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001769 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1770 * at end_trb, which may be in another segment. If the suspect DMA address is a
1771 * TRB in this TD, this function returns that TRB's segment. Otherwise it
1772 * returns 0.
1773 */
Hans de Goedecffb9be2014-08-20 16:41:51 +03001774struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
1775 struct xhci_segment *start_seg,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001776 union xhci_trb *start_trb,
1777 union xhci_trb *end_trb,
Hans de Goedecffb9be2014-08-20 16:41:51 +03001778 dma_addr_t suspect_dma,
1779 bool debug)
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001780{
1781 dma_addr_t start_dma;
1782 dma_addr_t end_seg_dma;
1783 dma_addr_t end_trb_dma;
1784 struct xhci_segment *cur_seg;
1785
Sarah Sharp23e3be12009-04-29 19:05:20 -07001786 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001787 cur_seg = start_seg;
1788
1789 do {
Sarah Sharp2fa88da2009-11-03 22:02:24 -08001790 if (start_dma == 0)
Randy Dunlap326b4812010-04-19 08:53:50 -07001791 return NULL;
Sarah Sharpae636742009-04-29 19:02:31 -07001792 /* We may get an event for a Link TRB in the middle of a TD */
Sarah Sharp23e3be12009-04-29 19:05:20 -07001793 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
Sarah Sharp2fa88da2009-11-03 22:02:24 -08001794 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001795 /* If the end TRB isn't in this segment, this is set to 0 */
Sarah Sharp23e3be12009-04-29 19:05:20 -07001796 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001797
Hans de Goedecffb9be2014-08-20 16:41:51 +03001798 if (debug)
1799 xhci_warn(xhci,
1800 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
1801 (unsigned long long)suspect_dma,
1802 (unsigned long long)start_dma,
1803 (unsigned long long)end_trb_dma,
1804 (unsigned long long)cur_seg->dma,
1805 (unsigned long long)end_seg_dma);
1806
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001807 if (end_trb_dma > 0) {
1808 /* The end TRB is in this segment, so suspect should be here */
1809 if (start_dma <= end_trb_dma) {
1810 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1811 return cur_seg;
1812 } else {
1813 /* Case for one segment with
1814 * a TD wrapped around to the top
1815 */
1816 if ((suspect_dma >= start_dma &&
1817 suspect_dma <= end_seg_dma) ||
1818 (suspect_dma >= cur_seg->dma &&
1819 suspect_dma <= end_trb_dma))
1820 return cur_seg;
1821 }
Randy Dunlap326b4812010-04-19 08:53:50 -07001822 return NULL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001823 } else {
1824 /* Might still be somewhere in this segment */
1825 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1826 return cur_seg;
1827 }
1828 cur_seg = cur_seg->next;
Sarah Sharp23e3be12009-04-29 19:05:20 -07001829 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
Sarah Sharp2fa88da2009-11-03 22:02:24 -08001830 } while (cur_seg != start_seg);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001831
Randy Dunlap326b4812010-04-19 08:53:50 -07001832 return NULL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001833}
1834
Jim Linef513be2019-06-03 18:53:44 +08001835static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
1836 struct xhci_virt_ep *ep)
1837{
1838 /*
1839 * As part of low/full-speed endpoint-halt processing
1840 * we must clear the TT buffer (USB 2.0 specification 11.17.5).
1841 */
1842 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) &&
1843 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) &&
1844 !(ep->ep_state & EP_CLEARING_TT)) {
1845 ep->ep_state |= EP_CLEARING_TT;
1846 td->urb->ep->hcpriv = td->urb->dev;
1847 if (usb_hub_clear_tt_buffer(td->urb))
1848 ep->ep_state &= ~EP_CLEARING_TT;
1849 }
1850}
1851
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001852static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1853 unsigned int slot_id, unsigned int ep_index,
Lu Baolu5fee5a52018-03-16 16:32:59 +02001854 unsigned int stream_id, struct xhci_td *td,
Mathias Nyman5eee4b62017-06-15 11:55:45 +03001855 enum xhci_ep_reset_type reset_type)
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001856{
1857 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001858 struct xhci_command *command;
Mathias Nymanb8c3b712019-06-18 17:27:47 +03001859
1860 /*
1861 * Avoid resetting endpoint if link is inactive. Can cause host hang.
1862 * Device will be reset soon to recover the link so don't do anything
1863 */
1864 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR)
1865 return;
1866
Mathias Nyman103afda2017-12-08 17:59:08 +02001867 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001868 if (!command)
1869 return;
1870
Mathias Nymand0167ad2015-03-10 19:49:00 +02001871 ep->ep_state |= EP_HALTED;
Sarah Sharp1624ae12010-05-06 13:40:08 -07001872
Mathias Nyman5eee4b62017-06-15 11:55:45 +03001873 xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
Sarah Sharp1624ae12010-05-06 13:40:08 -07001874
Mathias Nymanf5249462018-03-16 16:33:04 +02001875 if (reset_type == EP_HARD_RESET) {
1876 ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
Mathias Nyman93ceaa82020-04-21 17:08:20 +03001877 xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id,
1878 td);
Mathias Nymanf5249462018-03-16 16:33:04 +02001879 }
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001880 xhci_ring_cmd_db(xhci);
1881}
1882
1883/* Check if an error has halted the endpoint ring. The class driver will
1884 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1885 * However, a babble and other errors also halt the endpoint ring, and the class
1886 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1887 * Ring Dequeue Pointer command manually.
1888 */
1889static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1890 struct xhci_ep_ctx *ep_ctx,
1891 unsigned int trb_comp_code)
1892{
1893 /* TRB completion codes that may require a manual halt cleanup */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001894 if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
1895 trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
1896 trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
Rajesh Bhagatd4fc8bf2016-03-11 10:27:49 +05301897 /* The 0.95 spec says a babbling control endpoint
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001898 * is not halted. The 0.96 spec says it is. Some HW
1899 * claims to be 0.95 compliant, but it halts the control
1900 * endpoint anyway. Check if a babble halted the
1901 * endpoint.
1902 */
Mathias Nyman5071e6b2016-11-11 15:13:28 +02001903 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001904 return 1;
1905
1906 return 0;
1907}
1908
Sarah Sharpb45b5062009-12-09 15:59:06 -08001909int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1910{
1911 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1912 /* Vendor defined "informational" completion code,
1913 * treat as not-an-error.
1914 */
1915 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1916 trb_comp_code);
1917 xhci_dbg(xhci, "Treating code as success.\n");
1918 return 1;
1919 }
1920 return 0;
1921}
1922
Felipe Balbi55fa4392017-01-23 14:20:11 +02001923static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
1924 struct xhci_ring *ep_ring, int *status)
1925{
Felipe Balbi55fa4392017-01-23 14:20:11 +02001926 struct urb *urb = NULL;
1927
1928 /* Clean up the endpoint's TD list */
1929 urb = td->urb;
Felipe Balbi55fa4392017-01-23 14:20:11 +02001930
1931 /* if a bounce buffer was used to align this td then unmap it */
Felipe Balbia60f2f22017-01-23 14:20:14 +02001932 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
Felipe Balbi55fa4392017-01-23 14:20:11 +02001933
1934 /* Do one last check of the actual transfer length.
1935 * If the host controller said we transferred more data than the buffer
1936 * length, urb->actual_length will be a very big number (since it's
1937 * unsigned). Play it safe and say we didn't transfer anything.
1938 */
1939 if (urb->actual_length > urb->transfer_buffer_length) {
1940 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
1941 urb->transfer_buffer_length, urb->actual_length);
1942 urb->actual_length = 0;
1943 *status = 0;
1944 }
1945 list_del_init(&td->td_list);
1946 /* Was this TD slated to be cancelled but completed anyway? */
1947 if (!list_empty(&td->cancelled_td_list))
1948 list_del_init(&td->cancelled_td_list);
1949
1950 inc_td_cnt(urb);
1951 /* Giveback the urb when all the tds are completed */
1952 if (last_td_in_urb(td)) {
1953 if ((urb->actual_length != urb->transfer_buffer_length &&
1954 (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
1955 (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
1956 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
1957 urb, urb->actual_length,
1958 urb->transfer_buffer_length, *status);
1959
1960 /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
1961 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
1962 *status = 0;
1963 xhci_giveback_urb_in_irq(xhci, td, *status);
1964 }
1965
1966 return 0;
1967}
1968
Andiry Xu4422da62010-07-22 15:22:55 -07001969static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
Lu Baolu0c341912018-03-16 16:33:00 +02001970 struct xhci_transfer_event *event,
Mathias Nyman3134bc92017-06-15 11:55:48 +03001971 struct xhci_virt_ep *ep, int *status)
Andiry Xu4422da62010-07-22 15:22:55 -07001972{
1973 struct xhci_virt_device *xdev;
Andiry Xu4422da62010-07-22 15:22:55 -07001974 struct xhci_ep_ctx *ep_ctx;
Felipe Balbibe0f50c2017-01-23 14:20:10 +02001975 struct xhci_ring *ep_ring;
Felipe Balbibe0f50c2017-01-23 14:20:10 +02001976 unsigned int slot_id;
Andiry Xu4422da62010-07-22 15:22:55 -07001977 u32 trb_comp_code;
Felipe Balbibe0f50c2017-01-23 14:20:10 +02001978 int ep_index;
Andiry Xu4422da62010-07-22 15:22:55 -07001979
Matt Evans28ccd292011-03-29 13:40:46 +11001980 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Andiry Xu4422da62010-07-22 15:22:55 -07001981 xdev = xhci->devs[slot_id];
Matt Evans28ccd292011-03-29 13:40:46 +11001982 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1983 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
Andiry Xu4422da62010-07-22 15:22:55 -07001984 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +11001985 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Andiry Xu4422da62010-07-22 15:22:55 -07001986
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001987 if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
1988 trb_comp_code == COMP_STOPPED ||
1989 trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
Andiry Xu4422da62010-07-22 15:22:55 -07001990 /* The Endpoint Stop Command completion will take care of any
1991 * stopped TDs. A stopped TD may be restarted, so don't update
1992 * the ring dequeue pointer or take this TD off any lists yet.
1993 */
Andiry Xu4422da62010-07-22 15:22:55 -07001994 return 0;
Mathias Nyman69defe02014-11-27 18:19:14 +02001995 }
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001996 if (trb_comp_code == COMP_STALL_ERROR ||
Mathias Nyman69defe02014-11-27 18:19:14 +02001997 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1998 trb_comp_code)) {
Mathias Nyman8f972502020-04-21 17:08:22 +03001999 /*
2000 * xhci internal endpoint state will go to a "halt" state for
2001 * any stall, including default control pipe protocol stall.
2002 * To clear the host side halt we need to issue a reset endpoint
2003 * command, followed by a set dequeue command to move past the
2004 * TD.
2005 * Class drivers clear the device side halt from a functional
2006 * stall later. Hub TT buffer should only be cleared for FS/LS
2007 * devices behind HS hubs for functional stalls.
Mathias Nyman69defe02014-11-27 18:19:14 +02002008 */
Mathias Nyman8f972502020-04-21 17:08:22 +03002009 if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR))
2010 xhci_clear_hub_tt_buffer(xhci, td, ep);
Mathias Nyman69defe02014-11-27 18:19:14 +02002011 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
Lu Baolu5fee5a52018-03-16 16:32:59 +02002012 ep_ring->stream_id, td, EP_HARD_RESET);
Andiry Xu4422da62010-07-22 15:22:55 -07002013 } else {
Mathias Nyman69defe02014-11-27 18:19:14 +02002014 /* Update ring dequeue pointer */
2015 while (ep_ring->dequeue != td->last_trb)
Andiry Xu3b72fca2012-03-05 17:49:32 +08002016 inc_deq(xhci, ep_ring);
Mathias Nyman69defe02014-11-27 18:19:14 +02002017 inc_deq(xhci, ep_ring);
2018 }
Andiry Xu4422da62010-07-22 15:22:55 -07002019
Felipe Balbi55fa4392017-01-23 14:20:11 +02002020 return xhci_td_cleanup(xhci, td, ep_ring, status);
Andiry Xu4422da62010-07-22 15:22:55 -07002021}
2022
Mathias Nyman30a65b42016-11-11 15:13:17 +02002023/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
2024static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
2025 union xhci_trb *stop_trb)
2026{
2027 u32 sum;
2028 union xhci_trb *trb = ring->dequeue;
2029 struct xhci_segment *seg = ring->deq_seg;
2030
2031 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
2032 if (!trb_is_noop(trb) && !trb_is_link(trb))
2033 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
2034 }
2035 return sum;
2036}
2037
Andiry Xu4422da62010-07-22 15:22:55 -07002038/*
Andiry Xu8af56be2010-07-22 15:23:03 -07002039 * Process control tds, update urb status and actual_length.
2040 */
2041static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002042 union xhci_trb *ep_trb, struct xhci_transfer_event *event,
Andiry Xu8af56be2010-07-22 15:23:03 -07002043 struct xhci_virt_ep *ep, int *status)
2044{
2045 struct xhci_virt_device *xdev;
Andiry Xu8af56be2010-07-22 15:23:03 -07002046 unsigned int slot_id;
2047 int ep_index;
2048 struct xhci_ep_ctx *ep_ctx;
2049 u32 trb_comp_code;
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002050 u32 remaining, requested;
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002051 u32 trb_type;
Andiry Xu8af56be2010-07-22 15:23:03 -07002052
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002053 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
Matt Evans28ccd292011-03-29 13:40:46 +11002054 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Andiry Xu8af56be2010-07-22 15:23:03 -07002055 xdev = xhci->devs[slot_id];
Matt Evans28ccd292011-03-29 13:40:46 +11002056 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
Andiry Xu8af56be2010-07-22 15:23:03 -07002057 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +11002058 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002059 requested = td->urb->transfer_buffer_length;
2060 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2061
Andiry Xu8af56be2010-07-22 15:23:03 -07002062 switch (trb_comp_code) {
2063 case COMP_SUCCESS:
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002064 if (trb_type != TRB_STATUS) {
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002065 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002066 (trb_type == TRB_DATA) ? "data" : "setup");
Andiry Xu8af56be2010-07-22 15:23:03 -07002067 *status = -ESHUTDOWN;
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002068 break;
Andiry Xu8af56be2010-07-22 15:23:03 -07002069 }
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002070 *status = 0;
Andiry Xu8af56be2010-07-22 15:23:03 -07002071 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002072 case COMP_SHORT_PACKET:
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002073 *status = 0;
Andiry Xu8af56be2010-07-22 15:23:03 -07002074 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002075 case COMP_STOPPED_SHORT_PACKET:
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002076 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002077 td->urb->actual_length = remaining;
Lu Baolu40a3b772015-08-06 19:24:01 +03002078 else
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002079 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
2080 goto finish_td;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002081 case COMP_STOPPED:
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002082 switch (trb_type) {
2083 case TRB_SETUP:
2084 td->urb->actual_length = 0;
2085 goto finish_td;
2086 case TRB_DATA:
2087 case TRB_NORMAL:
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002088 td->urb->actual_length = requested - remaining;
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002089 goto finish_td;
Mathias Nyman0ab28812017-03-28 15:55:29 +03002090 case TRB_STATUS:
2091 td->urb->actual_length = requested;
2092 goto finish_td;
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002093 default:
2094 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
2095 trb_type);
2096 goto finish_td;
2097 }
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002098 case COMP_STOPPED_LENGTH_INVALID:
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002099 goto finish_td;
Andiry Xu8af56be2010-07-22 15:23:03 -07002100 default:
2101 if (!xhci_requires_manual_halt_cleanup(xhci,
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002102 ep_ctx, trb_comp_code))
Andiry Xu8af56be2010-07-22 15:23:03 -07002103 break;
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002104 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
2105 trb_comp_code, ep_index);
Andiry Xu8af56be2010-07-22 15:23:03 -07002106 /* else fall through */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002107 case COMP_STALL_ERROR:
Andiry Xu8af56be2010-07-22 15:23:03 -07002108 /* Did we transfer part of the data (middle) phase? */
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002109 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002110 td->urb->actual_length = requested - remaining;
Mathias Nyman22ae47e2015-05-29 17:01:53 +03002111 else if (!td->urb_length_set)
Andiry Xu8af56be2010-07-22 15:23:03 -07002112 td->urb->actual_length = 0;
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002113 goto finish_td;
Andiry Xu8af56be2010-07-22 15:23:03 -07002114 }
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002115
2116 /* stopped at setup stage, no data transferred */
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002117 if (trb_type == TRB_SETUP)
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002118 goto finish_td;
2119
Andiry Xu8af56be2010-07-22 15:23:03 -07002120 /*
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002121 * if on data stage then update the actual_length of the URB and flag it
2122 * as set, so it won't be overwritten in the event for the last TRB.
Andiry Xu8af56be2010-07-22 15:23:03 -07002123 */
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002124 if (trb_type == TRB_DATA ||
2125 trb_type == TRB_NORMAL) {
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002126 td->urb_length_set = true;
2127 td->urb->actual_length = requested - remaining;
2128 xhci_dbg(xhci, "Waiting for status stage event\n");
2129 return 0;
Andiry Xu8af56be2010-07-22 15:23:03 -07002130 }
2131
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002132 /* at status stage */
2133 if (!td->urb_length_set)
2134 td->urb->actual_length = requested;
2135
2136finish_td:
Lu Baolu0c341912018-03-16 16:33:00 +02002137 return finish_td(xhci, td, event, ep, status);
Andiry Xu8af56be2010-07-22 15:23:03 -07002138}
2139
2140/*
Andiry Xu04e51902010-07-22 15:23:39 -07002141 * Process isochronous tds, update urb packet status and actual_length.
2142 */
2143static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002144 union xhci_trb *ep_trb, struct xhci_transfer_event *event,
Andiry Xu04e51902010-07-22 15:23:39 -07002145 struct xhci_virt_ep *ep, int *status)
2146{
2147 struct xhci_ring *ep_ring;
2148 struct urb_priv *urb_priv;
2149 int idx;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002150 struct usb_iso_packet_descriptor *frame;
Andiry Xu04e51902010-07-22 15:23:39 -07002151 u32 trb_comp_code;
Mathias Nyman36da3a12016-11-11 15:13:19 +02002152 bool sum_trbs_for_length = false;
2153 u32 remaining, requested, ep_trb_len;
2154 int short_framestatus;
Andiry Xu04e51902010-07-22 15:23:39 -07002155
Matt Evans28ccd292011-03-29 13:40:46 +11002156 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2157 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Andiry Xu04e51902010-07-22 15:23:39 -07002158 urb_priv = td->urb->hcpriv;
Mathias Nyman9ef7fbb2017-01-23 14:20:25 +02002159 idx = urb_priv->num_tds_done;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002160 frame = &td->urb->iso_frame_desc[idx];
Mathias Nyman36da3a12016-11-11 15:13:19 +02002161 requested = frame->length;
2162 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2163 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2164 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2165 -EREMOTEIO : 0;
Andiry Xu04e51902010-07-22 15:23:39 -07002166
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002167 /* handle completion code */
2168 switch (trb_comp_code) {
2169 case COMP_SUCCESS:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002170 if (remaining) {
2171 frame->status = short_framestatus;
2172 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2173 sum_trbs_for_length = true;
Sarah Sharp1530bbc62012-05-08 09:22:49 -07002174 break;
2175 }
Mathias Nyman36da3a12016-11-11 15:13:19 +02002176 frame->status = 0;
2177 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002178 case COMP_SHORT_PACKET:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002179 frame->status = short_framestatus;
2180 sum_trbs_for_length = true;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002181 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002182 case COMP_BANDWIDTH_OVERRUN_ERROR:
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002183 frame->status = -ECOMM;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002184 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002185 case COMP_ISOCH_BUFFER_OVERRUN:
2186 case COMP_BABBLE_DETECTED_ERROR:
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002187 frame->status = -EOVERFLOW;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002188 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002189 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2190 case COMP_STALL_ERROR:
Mathias Nymand104d012015-04-30 17:16:02 +03002191 frame->status = -EPROTO;
Mathias Nymand104d012015-04-30 17:16:02 +03002192 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002193 case COMP_USB_TRANSACTION_ERROR:
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002194 frame->status = -EPROTO;
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002195 if (ep_trb != td->last_trb)
Mathias Nymand104d012015-04-30 17:16:02 +03002196 return 0;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002197 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002198 case COMP_STOPPED:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002199 sum_trbs_for_length = true;
2200 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002201 case COMP_STOPPED_SHORT_PACKET:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002202 /* field normally containing residue now contains tranferred */
2203 frame->status = short_framestatus;
2204 requested = remaining;
2205 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002206 case COMP_STOPPED_LENGTH_INVALID:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002207 requested = 0;
2208 remaining = 0;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002209 break;
2210 default:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002211 sum_trbs_for_length = true;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002212 frame->status = -1;
2213 break;
Andiry Xu04e51902010-07-22 15:23:39 -07002214 }
2215
Mathias Nyman36da3a12016-11-11 15:13:19 +02002216 if (sum_trbs_for_length)
2217 frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
2218 ep_trb_len - remaining;
2219 else
2220 frame->actual_length = requested;
Andiry Xu04e51902010-07-22 15:23:39 -07002221
Mathias Nyman36da3a12016-11-11 15:13:19 +02002222 td->urb->actual_length += frame->actual_length;
Andiry Xu04e51902010-07-22 15:23:39 -07002223
Lu Baolu0c341912018-03-16 16:33:00 +02002224 return finish_td(xhci, td, event, ep, status);
Andiry Xu04e51902010-07-22 15:23:39 -07002225}
2226
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002227static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2228 struct xhci_transfer_event *event,
2229 struct xhci_virt_ep *ep, int *status)
2230{
2231 struct xhci_ring *ep_ring;
2232 struct urb_priv *urb_priv;
2233 struct usb_iso_packet_descriptor *frame;
2234 int idx;
2235
Matt Evansf6975312011-06-01 13:01:01 +10002236 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002237 urb_priv = td->urb->hcpriv;
Mathias Nyman9ef7fbb2017-01-23 14:20:25 +02002238 idx = urb_priv->num_tds_done;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002239 frame = &td->urb->iso_frame_desc[idx];
2240
Sarah Sharpb3df3f92011-06-15 19:57:46 -07002241 /* The transfer is partly done. */
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002242 frame->status = -EXDEV;
2243
2244 /* calc actual length */
2245 frame->actual_length = 0;
2246
2247 /* Update ring dequeue pointer */
2248 while (ep_ring->dequeue != td->last_trb)
Andiry Xu3b72fca2012-03-05 17:49:32 +08002249 inc_deq(xhci, ep_ring);
2250 inc_deq(xhci, ep_ring);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002251
Mathias Nyman3134bc92017-06-15 11:55:48 +03002252 return xhci_td_cleanup(xhci, td, ep_ring, status);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002253}
2254
Andiry Xu04e51902010-07-22 15:23:39 -07002255/*
Andiry Xu22405ed2010-07-22 15:23:08 -07002256 * Process bulk and interrupt tds, update urb status and actual_length.
2257 */
2258static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002259 union xhci_trb *ep_trb, struct xhci_transfer_event *event,
Andiry Xu22405ed2010-07-22 15:23:08 -07002260 struct xhci_virt_ep *ep, int *status)
2261{
Mathias Nymanf8f80be2018-09-20 19:13:37 +03002262 struct xhci_slot_ctx *slot_ctx;
Andiry Xu22405ed2010-07-22 15:23:08 -07002263 struct xhci_ring *ep_ring;
Andiry Xu22405ed2010-07-22 15:23:08 -07002264 u32 trb_comp_code;
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002265 u32 remaining, requested, ep_trb_len;
Mathias Nymanf8f80be2018-09-20 19:13:37 +03002266 unsigned int slot_id;
2267 int ep_index;
Andiry Xu22405ed2010-07-22 15:23:08 -07002268
Mathias Nymanf8f80be2018-09-20 19:13:37 +03002269 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2270 slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[slot_id]->out_ctx);
2271 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
Matt Evans28ccd292011-03-29 13:40:46 +11002272 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2273 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Mathias Nyman30a65b42016-11-11 15:13:17 +02002274 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002275 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
Mathias Nyman30a65b42016-11-11 15:13:17 +02002276 requested = td->urb->transfer_buffer_length;
Andiry Xu22405ed2010-07-22 15:23:08 -07002277
2278 switch (trb_comp_code) {
2279 case COMP_SUCCESS:
Mathias Nymanf8f80be2018-09-20 19:13:37 +03002280 ep_ring->err_count = 0;
Mathias Nyman30a65b42016-11-11 15:13:17 +02002281 /* handle success with untransferred data as short packet */
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002282 if (ep_trb != td->last_trb || remaining) {
Mathias Nyman52ab8682016-11-11 15:13:15 +02002283 xhci_warn(xhci, "WARN Successful completion on short TX\n");
Mathias Nyman30a65b42016-11-11 15:13:17 +02002284 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2285 td->urb->ep->desc.bEndpointAddress,
2286 requested, remaining);
Andiry Xu22405ed2010-07-22 15:23:08 -07002287 }
Mathias Nyman52ab8682016-11-11 15:13:15 +02002288 *status = 0;
Andiry Xu22405ed2010-07-22 15:23:08 -07002289 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002290 case COMP_SHORT_PACKET:
Mathias Nyman30a65b42016-11-11 15:13:17 +02002291 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2292 td->urb->ep->desc.bEndpointAddress,
2293 requested, remaining);
2294 *status = 0;
2295 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002296 case COMP_STOPPED_SHORT_PACKET:
Mathias Nyman30a65b42016-11-11 15:13:17 +02002297 td->urb->actual_length = remaining;
2298 goto finish_td;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002299 case COMP_STOPPED_LENGTH_INVALID:
Mathias Nyman30a65b42016-11-11 15:13:17 +02002300 /* stopped on ep trb with invalid length, exclude it */
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002301 ep_trb_len = 0;
Mathias Nyman30a65b42016-11-11 15:13:17 +02002302 remaining = 0;
Andiry Xu22405ed2010-07-22 15:23:08 -07002303 break;
Mathias Nymanf8f80be2018-09-20 19:13:37 +03002304 case COMP_USB_TRANSACTION_ERROR:
2305 if ((ep_ring->err_count++ > MAX_SOFT_RETRY) ||
2306 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
2307 break;
2308 *status = 0;
2309 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
2310 ep_ring->stream_id, td, EP_SOFT_RESET);
2311 return 0;
Andiry Xu22405ed2010-07-22 15:23:08 -07002312 default:
Mathias Nyman30a65b42016-11-11 15:13:17 +02002313 /* do nothing */
Andiry Xu22405ed2010-07-22 15:23:08 -07002314 break;
2315 }
Mathias Nyman30a65b42016-11-11 15:13:17 +02002316
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002317 if (ep_trb == td->last_trb)
Mathias Nyman30a65b42016-11-11 15:13:17 +02002318 td->urb->actual_length = requested - remaining;
2319 else
Lu Baolu40a3b772015-08-06 19:24:01 +03002320 td->urb->actual_length =
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002321 sum_trb_lengths(xhci, ep_ring, ep_trb) +
2322 ep_trb_len - remaining;
Mathias Nyman30a65b42016-11-11 15:13:17 +02002323finish_td:
2324 if (remaining > requested) {
2325 xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
2326 remaining);
Andiry Xu22405ed2010-07-22 15:23:08 -07002327 td->urb->actual_length = 0;
Andiry Xu22405ed2010-07-22 15:23:08 -07002328 }
Lu Baolu0c341912018-03-16 16:33:00 +02002329 return finish_td(xhci, td, event, ep, status);
Andiry Xu22405ed2010-07-22 15:23:08 -07002330}
2331
2332/*
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002333 * If this function returns an error condition, it means it got a Transfer
2334 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2335 * At this point, the host controller is probably hosed and should be reset.
2336 */
2337static int handle_tx_event(struct xhci_hcd *xhci,
2338 struct xhci_transfer_event *event)
2339{
2340 struct xhci_virt_device *xdev;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002341 struct xhci_virt_ep *ep;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002342 struct xhci_ring *ep_ring;
Sarah Sharp82d10092009-08-07 14:04:52 -07002343 unsigned int slot_id;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002344 int ep_index;
Randy Dunlap326b4812010-04-19 08:53:50 -07002345 struct xhci_td *td = NULL;
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002346 dma_addr_t ep_trb_dma;
2347 struct xhci_segment *ep_seg;
2348 union xhci_trb *ep_trb;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002349 int status = -EINPROGRESS;
John Yound115b042009-07-27 12:05:15 -07002350 struct xhci_ep_ctx *ep_ctx;
Andiry Xuc2d7b492011-09-19 16:05:12 -07002351 struct list_head *tmp;
Sarah Sharp66d1eeb2009-08-27 14:35:53 -07002352 u32 trb_comp_code;
Andiry Xuc2d7b492011-09-19 16:05:12 -07002353 int td_num = 0;
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002354 bool handling_skipped_tds = false;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002355
Matt Evans28ccd292011-03-29 13:40:46 +11002356 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Mathias Nymanb3368382017-06-15 11:55:43 +03002357 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2358 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2359 ep_trb_dma = le64_to_cpu(event->buffer);
2360
Sarah Sharp82d10092009-08-07 14:04:52 -07002361 xdev = xhci->devs[slot_id];
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002362 if (!xdev) {
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002363 xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
2364 slot_id);
Mathias Nymanb3368382017-06-15 11:55:43 +03002365 goto err_out;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002366 }
2367
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002368 ep = &xdev->eps[ep_index];
Mathias Nymanb3368382017-06-15 11:55:43 +03002369 ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
John Yound115b042009-07-27 12:05:15 -07002370 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Mathias Nymanb3368382017-06-15 11:55:43 +03002371
Mathias Nymanade2e3a2017-06-15 11:55:46 +03002372 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002373 xhci_err(xhci,
Mathias Nymanade2e3a2017-06-15 11:55:46 +03002374 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002375 slot_id, ep_index);
Mathias Nymanb3368382017-06-15 11:55:43 +03002376 goto err_out;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002377 }
2378
Mathias Nymanade2e3a2017-06-15 11:55:46 +03002379 /* Some transfer events don't always point to a trb, see xhci 4.17.4 */
2380 if (!ep_ring) {
2381 switch (trb_comp_code) {
2382 case COMP_STALL_ERROR:
2383 case COMP_USB_TRANSACTION_ERROR:
2384 case COMP_INVALID_STREAM_TYPE_ERROR:
2385 case COMP_INVALID_STREAM_ID_ERROR:
2386 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0,
Lu Baolu5fee5a52018-03-16 16:32:59 +02002387 NULL, EP_SOFT_RESET);
Mathias Nymanade2e3a2017-06-15 11:55:46 +03002388 goto cleanup;
2389 case COMP_RING_UNDERRUN:
2390 case COMP_RING_OVERRUN:
Sandeep Singhd9193ef2018-11-09 17:21:19 +02002391 case COMP_STOPPED_LENGTH_INVALID:
Mathias Nymanade2e3a2017-06-15 11:55:46 +03002392 goto cleanup;
2393 default:
2394 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
2395 slot_id, ep_index);
2396 goto err_out;
2397 }
2398 }
2399
Andiry Xuc2d7b492011-09-19 16:05:12 -07002400 /* Count current td numbers if ep->skip is set */
2401 if (ep->skip) {
2402 list_for_each(tmp, &ep_ring->td_list)
2403 td_num++;
2404 }
2405
Andiry Xu986a92d2010-07-22 15:23:20 -07002406 /* Look for common error cases */
Sarah Sharp66d1eeb2009-08-27 14:35:53 -07002407 switch (trb_comp_code) {
Sarah Sharpb10de142009-04-27 19:58:50 -07002408 /* Skip codes that require special handling depending on
2409 * transfer type
2410 */
2411 case COMP_SUCCESS:
Vivek Gautam1c11a172013-03-21 12:06:48 +05302412 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
Sarah Sharp1530bbc62012-05-08 09:22:49 -07002413 break;
Mathias Nyman7ff11162019-12-11 16:20:06 +02002414 if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
2415 ep_ring->last_td_was_short)
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002416 trb_comp_code = COMP_SHORT_PACKET;
Sarah Sharp1530bbc62012-05-08 09:22:49 -07002417 else
Sarah Sharp8202ce22012-07-25 10:52:45 -07002418 xhci_warn_ratelimited(xhci,
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002419 "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
2420 slot_id, ep_index);
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002421 case COMP_SHORT_PACKET:
Sarah Sharpb10de142009-04-27 19:58:50 -07002422 break;
Mathias Nymanb3368382017-06-15 11:55:43 +03002423 /* Completion codes for endpoint stopped state */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002424 case COMP_STOPPED:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002425 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
2426 slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -07002427 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002428 case COMP_STOPPED_LENGTH_INVALID:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002429 xhci_dbg(xhci,
2430 "Stopped on No-op or Link TRB for slot %u ep %u\n",
2431 slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -07002432 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002433 case COMP_STOPPED_SHORT_PACKET:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002434 xhci_dbg(xhci,
2435 "Stopped with short packet transfer detected for slot %u ep %u\n",
2436 slot_id, ep_index);
Lu Baolu40a3b772015-08-06 19:24:01 +03002437 break;
Mathias Nymanb3368382017-06-15 11:55:43 +03002438 /* Completion codes for endpoint halted state */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002439 case COMP_STALL_ERROR:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002440 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
2441 ep_index);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002442 ep->ep_state |= EP_HALTED;
Sarah Sharpb10de142009-04-27 19:58:50 -07002443 status = -EPIPE;
2444 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002445 case COMP_SPLIT_TRANSACTION_ERROR:
Mathias Nyman76eac5d2020-03-12 16:45:10 +02002446 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n",
2447 slot_id, ep_index);
2448 status = -EPROTO;
2449 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002450 case COMP_USB_TRANSACTION_ERROR:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002451 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
2452 slot_id, ep_index);
Sarah Sharpb10de142009-04-27 19:58:50 -07002453 status = -EPROTO;
2454 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002455 case COMP_BABBLE_DETECTED_ERROR:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002456 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
2457 slot_id, ep_index);
Sarah Sharp4a731432009-07-27 12:04:32 -07002458 status = -EOVERFLOW;
2459 break;
Mathias Nymanb3368382017-06-15 11:55:43 +03002460 /* Completion codes for endpoint error state */
2461 case COMP_TRB_ERROR:
2462 xhci_warn(xhci,
2463 "WARN: TRB error for slot %u ep %u on endpoint\n",
2464 slot_id, ep_index);
2465 status = -EILSEQ;
2466 break;
2467 /* completion codes not indicating endpoint state change */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002468 case COMP_DATA_BUFFER_ERROR:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002469 xhci_warn(xhci,
2470 "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
2471 slot_id, ep_index);
Sarah Sharpb10de142009-04-27 19:58:50 -07002472 status = -ENOSR;
2473 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002474 case COMP_BANDWIDTH_OVERRUN_ERROR:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002475 xhci_warn(xhci,
2476 "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
2477 slot_id, ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002478 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002479 case COMP_ISOCH_BUFFER_OVERRUN:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002480 xhci_warn(xhci,
2481 "WARN: buffer overrun event for slot %u ep %u on endpoint",
2482 slot_id, ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002483 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002484 case COMP_RING_UNDERRUN:
Andiry Xu986a92d2010-07-22 15:23:20 -07002485 /*
2486 * When the Isoch ring is empty, the xHC will generate
2487 * a Ring Overrun Event for IN Isoch endpoint or Ring
2488 * Underrun Event for OUT Isoch endpoint.
2489 */
2490 xhci_dbg(xhci, "underrun event on endpoint\n");
2491 if (!list_empty(&ep_ring->td_list))
2492 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2493 "still with TDs queued?\n",
Matt Evans28ccd292011-03-29 13:40:46 +11002494 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2495 ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002496 goto cleanup;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002497 case COMP_RING_OVERRUN:
Andiry Xu986a92d2010-07-22 15:23:20 -07002498 xhci_dbg(xhci, "overrun event on endpoint\n");
2499 if (!list_empty(&ep_ring->td_list))
2500 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2501 "still with TDs queued?\n",
Matt Evans28ccd292011-03-29 13:40:46 +11002502 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2503 ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002504 goto cleanup;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002505 case COMP_MISSED_SERVICE_ERROR:
Andiry Xud18240d2010-07-22 15:23:25 -07002506 /*
2507 * When encounter missed service error, one or more isoc tds
2508 * may be missed by xHC.
2509 * Set skip flag of the ep_ring; Complete the missed tds as
2510 * short transfer when process the ep_ring next time.
2511 */
2512 ep->skip = true;
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002513 xhci_dbg(xhci,
2514 "Miss service interval error for slot %u ep %u, set skip flag\n",
2515 slot_id, ep_index);
Andiry Xud18240d2010-07-22 15:23:25 -07002516 goto cleanup;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002517 case COMP_NO_PING_RESPONSE_ERROR:
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002518 ep->skip = true;
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002519 xhci_dbg(xhci,
2520 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
2521 slot_id, ep_index);
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002522 goto cleanup;
Mathias Nymanb3368382017-06-15 11:55:43 +03002523
2524 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2525 /* needs disable slot command to recover */
2526 xhci_warn(xhci,
2527 "WARN: detect an incompatible device for slot %u ep %u",
2528 slot_id, ep_index);
2529 status = -EPROTO;
2530 break;
Sarah Sharpb10de142009-04-27 19:58:50 -07002531 default:
Sarah Sharpb45b5062009-12-09 15:59:06 -08002532 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
Sarah Sharp5ad6a522009-11-11 10:28:40 -08002533 status = 0;
2534 break;
2535 }
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002536 xhci_warn(xhci,
2537 "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
2538 trb_comp_code, slot_id, ep_index);
Sarah Sharpb10de142009-04-27 19:58:50 -07002539 goto cleanup;
2540 }
Andiry Xu986a92d2010-07-22 15:23:20 -07002541
Andiry Xud18240d2010-07-22 15:23:25 -07002542 do {
2543 /* This TRB should be in the TD at the head of this ring's
2544 * TD list.
2545 */
2546 if (list_empty(&ep_ring->td_list)) {
Sarah Sharpa83d6752013-03-18 10:19:51 -07002547 /*
Mathias Nymane4ec40e2017-12-01 13:41:19 +02002548 * Don't print wanings if it's due to a stopped endpoint
2549 * generating an extra completion event if the device
2550 * was suspended. Or, a event for the last TRB of a
2551 * short TD we already got a short event for.
2552 * The short TD is already removed from the TD list.
Sarah Sharpa83d6752013-03-18 10:19:51 -07002553 */
Mathias Nymane4ec40e2017-12-01 13:41:19 +02002554
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002555 if (!(trb_comp_code == COMP_STOPPED ||
Mathias Nymane4ec40e2017-12-01 13:41:19 +02002556 trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
2557 ep_ring->last_td_was_short)) {
Sarah Sharpa83d6752013-03-18 10:19:51 -07002558 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2559 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2560 ep_index);
Sarah Sharpa83d6752013-03-18 10:19:51 -07002561 }
Andiry Xud18240d2010-07-22 15:23:25 -07002562 if (ep->skip) {
2563 ep->skip = false;
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002564 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
2565 slot_id, ep_index);
Andiry Xud18240d2010-07-22 15:23:25 -07002566 }
Mathias Nyman93ceaa82020-04-21 17:08:20 +03002567 if (trb_comp_code == COMP_STALL_ERROR ||
2568 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2569 trb_comp_code)) {
2570 xhci_cleanup_halted_endpoint(xhci, slot_id,
2571 ep_index,
2572 ep_ring->stream_id,
2573 NULL,
2574 EP_HARD_RESET);
2575 }
Andiry Xud18240d2010-07-22 15:23:25 -07002576 goto cleanup;
2577 }
Andiry Xu986a92d2010-07-22 15:23:20 -07002578
Andiry Xuc2d7b492011-09-19 16:05:12 -07002579 /* We've skipped all the TDs on the ep ring when ep->skip set */
2580 if (ep->skip && td_num == 0) {
2581 ep->skip = false;
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002582 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
2583 slot_id, ep_index);
Andiry Xuc2d7b492011-09-19 16:05:12 -07002584 goto cleanup;
2585 }
2586
Felipe Balbi04861f82017-01-23 14:20:09 +02002587 td = list_first_entry(&ep_ring->td_list, struct xhci_td,
2588 td_list);
Andiry Xuc2d7b492011-09-19 16:05:12 -07002589 if (ep->skip)
2590 td_num--;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002591
Andiry Xud18240d2010-07-22 15:23:25 -07002592 /* Is this a TRB in the currently executing TD? */
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002593 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2594 td->last_trb, ep_trb_dma, false);
Alex Hee1cf4862011-06-03 15:58:25 +08002595
2596 /*
2597 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2598 * is not in the current TD pointed by ep_ring->dequeue because
2599 * that the hardware dequeue pointer still at the previous TRB
2600 * of the current TD. The previous TRB maybe a Link TD or the
2601 * last TRB of the previous TD. The command completion handle
2602 * will take care the rest.
2603 */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002604 if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
2605 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
Alex Hee1cf4862011-06-03 15:58:25 +08002606 goto cleanup;
2607 }
2608
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002609 if (!ep_seg) {
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002610 if (!ep->skip ||
2611 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
Sarah Sharpad808332011-05-25 10:43:56 -07002612 /* Some host controllers give a spurious
2613 * successful event after a short transfer.
2614 * Ignore it.
2615 */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03002616 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
Sarah Sharpad808332011-05-25 10:43:56 -07002617 ep_ring->last_td_was_short) {
2618 ep_ring->last_td_was_short = false;
Sarah Sharpad808332011-05-25 10:43:56 -07002619 goto cleanup;
2620 }
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002621 /* HC is busted, give up! */
2622 xhci_err(xhci,
2623 "ERROR Transfer event TRB DMA ptr not "
Hans de Goedecffb9be2014-08-20 16:41:51 +03002624 "part of current TD ep_index %d "
2625 "comp_code %u\n", ep_index,
2626 trb_comp_code);
2627 trb_in_td(xhci, ep_ring->deq_seg,
2628 ep_ring->dequeue, td->last_trb,
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002629 ep_trb_dma, true);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002630 return -ESHUTDOWN;
2631 }
2632
Mathias Nyman0c03d892016-11-11 15:13:23 +02002633 skip_isoc_td(xhci, td, event, ep, &status);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002634 goto cleanup;
2635 }
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002636 if (trb_comp_code == COMP_SHORT_PACKET)
Sarah Sharpad808332011-05-25 10:43:56 -07002637 ep_ring->last_td_was_short = true;
2638 else
2639 ep_ring->last_td_was_short = false;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002640
2641 if (ep->skip) {
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002642 xhci_dbg(xhci,
2643 "Found td. Clear skip flag for slot %u ep %u.\n",
2644 slot_id, ep_index);
Andiry Xud18240d2010-07-22 15:23:25 -07002645 ep->skip = false;
2646 }
Andiry Xu986a92d2010-07-22 15:23:20 -07002647
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002648 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
2649 sizeof(*ep_trb)];
Felipe Balbia37c3f72017-01-23 14:20:19 +02002650
2651 trace_xhci_handle_transfer(ep_ring,
2652 (struct xhci_generic_trb *) ep_trb);
2653
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002654 /*
Lu Baolu810a6242017-10-06 17:45:29 +03002655 * No-op TRB could trigger interrupts in a case where
2656 * a URB was killed and a STALL_ERROR happens right
2657 * after the endpoint ring stopped. Reset the halted
2658 * endpoint. Otherwise, the endpoint remains stalled
2659 * indefinitely.
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002660 */
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002661 if (trb_is_noop(ep_trb)) {
Lu Baolu810a6242017-10-06 17:45:29 +03002662 if (trb_comp_code == COMP_STALL_ERROR ||
2663 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2664 trb_comp_code))
2665 xhci_cleanup_halted_endpoint(xhci, slot_id,
2666 ep_index,
2667 ep_ring->stream_id,
Lu Baolu5fee5a52018-03-16 16:32:59 +02002668 td, EP_HARD_RESET);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002669 goto cleanup;
Andiry Xud18240d2010-07-22 15:23:25 -07002670 }
2671
Mathias Nyman0c03d892016-11-11 15:13:23 +02002672 /* update the urb's actual_length and give back to the core */
Andiry Xud18240d2010-07-22 15:23:25 -07002673 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
Mathias Nyman0c03d892016-11-11 15:13:23 +02002674 process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
Andiry Xu04e51902010-07-22 15:23:39 -07002675 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
Mathias Nyman0c03d892016-11-11 15:13:23 +02002676 process_isoc_td(xhci, td, ep_trb, event, ep, &status);
Andiry Xud18240d2010-07-22 15:23:25 -07002677 else
Mathias Nyman0c03d892016-11-11 15:13:23 +02002678 process_bulk_intr_td(xhci, td, ep_trb, event, ep,
2679 &status);
Andiry Xu4422da62010-07-22 15:22:55 -07002680cleanup:
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002681 handling_skipped_tds = ep->skip &&
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002682 trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
2683 trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002684
Andiry Xud18240d2010-07-22 15:23:25 -07002685 /*
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002686 * Do not update event ring dequeue pointer if we're in a loop
2687 * processing missed tds.
Sarah Sharp82d10092009-08-07 14:04:52 -07002688 */
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002689 if (!handling_skipped_tds)
Andiry Xu3b72fca2012-03-05 17:49:32 +08002690 inc_deq(xhci, xhci->event_ring);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002691
Andiry Xud18240d2010-07-22 15:23:25 -07002692 /*
2693 * If ep->skip is set, it means there are missed tds on the
2694 * endpoint ring need to take care of.
2695 * Process them as short transfer until reach the td pointed by
2696 * the event.
2697 */
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002698 } while (handling_skipped_tds);
Andiry Xud18240d2010-07-22 15:23:25 -07002699
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002700 return 0;
Mathias Nymanb3368382017-06-15 11:55:43 +03002701
2702err_out:
2703 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2704 (unsigned long long) xhci_trb_virt_to_dma(
2705 xhci->event_ring->deq_seg,
2706 xhci->event_ring->dequeue),
2707 lower_32_bits(le64_to_cpu(event->buffer)),
2708 upper_32_bits(le64_to_cpu(event->buffer)),
2709 le32_to_cpu(event->transfer_len),
2710 le32_to_cpu(event->flags));
2711 return -ENODEV;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002712}
2713
2714/*
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002715 * This function handles all OS-owned events on the event ring. It may drop
2716 * xhci->lock between event processing (e.g. to pass up port status changes).
Matt Evans9dee9a22011-03-29 13:41:02 +11002717 * Returns >0 for "possibly more events to process" (caller should call again),
2718 * otherwise 0 if done. In future, <0 returns should indicate error code.
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002719 */
Matt Evans9dee9a22011-03-29 13:41:02 +11002720static int xhci_handle_event(struct xhci_hcd *xhci)
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002721{
2722 union xhci_trb *event;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002723 int update_ptrs = 1;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002724 int ret;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002725
Lu Baoluf4c8f032016-11-11 15:13:25 +02002726 /* Event ring hasn't been allocated yet. */
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002727 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
Lu Baoluf4c8f032016-11-11 15:13:25 +02002728 xhci_err(xhci, "ERROR event ring not ready\n");
2729 return -ENOMEM;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002730 }
2731
2732 event = xhci->event_ring->dequeue;
2733 /* Does the HC or OS own the TRB? */
Matt Evans28ccd292011-03-29 13:40:46 +11002734 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
Lu Baoluf4c8f032016-11-11 15:13:25 +02002735 xhci->event_ring->cycle_state)
Matt Evans9dee9a22011-03-29 13:41:02 +11002736 return 0;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002737
Felipe Balbia37c3f72017-01-23 14:20:19 +02002738 trace_xhci_handle_event(xhci->event_ring, &event->generic);
2739
Matt Evans92a3da42011-03-29 13:40:51 +11002740 /*
2741 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2742 * speculative reads of the event's flags/data below.
2743 */
2744 rmb();
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002745 /* FIXME: Handle more event types. */
Lu Baoluf4c8f032016-11-11 15:13:25 +02002746 switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002747 case TRB_TYPE(TRB_COMPLETION):
2748 handle_cmd_completion(xhci, &event->event_cmd);
2749 break;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002750 case TRB_TYPE(TRB_PORT_STATUS):
2751 handle_port_status(xhci, event);
2752 update_ptrs = 0;
2753 break;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002754 case TRB_TYPE(TRB_TRANSFER):
2755 ret = handle_tx_event(xhci, &event->trans_event);
Lu Baoluf4c8f032016-11-11 15:13:25 +02002756 if (ret >= 0)
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002757 update_ptrs = 0;
2758 break;
Sarah Sharp623bef92011-11-11 14:57:33 -08002759 case TRB_TYPE(TRB_DEV_NOTE):
2760 handle_device_notification(xhci, event);
2761 break;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002762 default:
Matt Evans28ccd292011-03-29 13:40:46 +11002763 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2764 TRB_TYPE(48))
Sarah Sharp02386342010-05-24 13:25:28 -07002765 handle_vendor_event(xhci, event);
2766 else
Lu Baoluf4c8f032016-11-11 15:13:25 +02002767 xhci_warn(xhci, "ERROR unknown event type %d\n",
2768 TRB_FIELD_TO_TYPE(
2769 le32_to_cpu(event->event_cmd.flags)));
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002770 }
Sarah Sharp6f5165c2009-10-27 10:57:01 -07002771 /* Any of the above functions may drop and re-acquire the lock, so check
2772 * to make sure a watchdog timer didn't mark the host as non-responsive.
2773 */
2774 if (xhci->xhc_state & XHCI_STATE_DYING) {
2775 xhci_dbg(xhci, "xHCI host dying, returning from "
2776 "event handler.\n");
Matt Evans9dee9a22011-03-29 13:41:02 +11002777 return 0;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07002778 }
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002779
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002780 if (update_ptrs)
2781 /* Update SW event ring dequeue pointer */
Andiry Xu3b72fca2012-03-05 17:49:32 +08002782 inc_deq(xhci, xhci->event_ring);
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002783
Matt Evans9dee9a22011-03-29 13:41:02 +11002784 /* Are there more items on the event ring? Caller will call us again to
2785 * check.
2786 */
2787 return 1;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002788}
Sarah Sharp9032cd52010-07-29 22:12:29 -07002789
2790/*
Peter Chendc0ffbe2019-11-15 18:50:00 +02002791 * Update Event Ring Dequeue Pointer:
2792 * - When all events have finished
2793 * - To avoid "Event Ring Full Error" condition
2794 */
2795static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
2796 union xhci_trb *event_ring_deq)
2797{
2798 u64 temp_64;
2799 dma_addr_t deq;
2800
2801 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2802 /* If necessary, update the HW's version of the event ring deq ptr. */
2803 if (event_ring_deq != xhci->event_ring->dequeue) {
2804 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2805 xhci->event_ring->dequeue);
2806 if (deq == 0)
2807 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
2808 /*
2809 * Per 4.9.4, Software writes to the ERDP register shall
2810 * always advance the Event Ring Dequeue Pointer value.
2811 */
2812 if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
2813 ((u64) deq & (u64) ~ERST_PTR_MASK))
2814 return;
2815
2816 /* Update HC event ring dequeue pointer */
2817 temp_64 &= ERST_PTR_MASK;
2818 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2819 }
2820
2821 /* Clear the event handler busy flag (RW1C) */
2822 temp_64 |= ERST_EHB;
2823 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2824}
2825
2826/*
Sarah Sharp9032cd52010-07-29 22:12:29 -07002827 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2828 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2829 * indicators of an event TRB error, but we check the status *first* to be safe.
2830 */
2831irqreturn_t xhci_irq(struct usb_hcd *hcd)
2832{
2833 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002834 union xhci_trb *event_ring_deq;
Felipe Balbi76a35292017-01-23 14:20:07 +02002835 irqreturn_t ret = IRQ_NONE;
Alan Stern63aea0d2017-05-17 18:32:03 +03002836 unsigned long flags;
Felipe Balbi76a35292017-01-23 14:20:07 +02002837 u64 temp_64;
2838 u32 status;
Peter Chendc0ffbe2019-11-15 18:50:00 +02002839 int event_loop = 0;
Sarah Sharp9032cd52010-07-29 22:12:29 -07002840
Alan Stern63aea0d2017-05-17 18:32:03 +03002841 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp9032cd52010-07-29 22:12:29 -07002842 /* Check if the xHC generated the interrupt, or the irq is shared */
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +02002843 status = readl(&xhci->op_regs->status);
Mathias Nymand9f11ba2017-04-07 17:57:01 +03002844 if (status == ~(u32)0) {
2845 xhci_hc_died(xhci);
Felipe Balbi76a35292017-01-23 14:20:07 +02002846 ret = IRQ_HANDLED;
2847 goto out;
Sarah Sharp9032cd52010-07-29 22:12:29 -07002848 }
Felipe Balbi76a35292017-01-23 14:20:07 +02002849
2850 if (!(status & STS_EINT))
2851 goto out;
2852
Sarah Sharp27e0dd42010-07-29 22:12:43 -07002853 if (status & STS_FATAL) {
Sarah Sharp9032cd52010-07-29 22:12:29 -07002854 xhci_warn(xhci, "WARNING: Host System Error\n");
2855 xhci_halt(xhci);
Felipe Balbi76a35292017-01-23 14:20:07 +02002856 ret = IRQ_HANDLED;
2857 goto out;
Sarah Sharp9032cd52010-07-29 22:12:29 -07002858 }
2859
Sarah Sharpbda53142010-07-29 22:12:38 -07002860 /*
2861 * Clear the op reg interrupt status first,
2862 * so we can receive interrupts from other MSI-X interrupters.
2863 * Write 1 to clear the interrupt status.
2864 */
Sarah Sharp27e0dd42010-07-29 22:12:43 -07002865 status |= STS_EINT;
Xenia Ragiadakou204b7792013-11-15 05:34:07 +02002866 writel(status, &xhci->op_regs->status);
Sarah Sharpbda53142010-07-29 22:12:38 -07002867
Peter Chen6a29bee2017-05-17 18:32:02 +03002868 if (!hcd->msi_enabled) {
Sarah Sharpc21599a2010-07-29 22:13:00 -07002869 u32 irq_pending;
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +02002870 irq_pending = readl(&xhci->ir_set->irq_pending);
Felipe Balbi4e833c02012-03-15 16:37:08 +02002871 irq_pending |= IMAN_IP;
Xenia Ragiadakou204b7792013-11-15 05:34:07 +02002872 writel(irq_pending, &xhci->ir_set->irq_pending);
Sarah Sharpc21599a2010-07-29 22:13:00 -07002873 }
Sarah Sharpbda53142010-07-29 22:12:38 -07002874
Gabriel Krisman Bertazi27a41a82016-06-01 18:09:07 +03002875 if (xhci->xhc_state & XHCI_STATE_DYING ||
2876 xhci->xhc_state & XHCI_STATE_HALTED) {
Sarah Sharpbda53142010-07-29 22:12:38 -07002877 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2878 "Shouldn't IRQs be disabled?\n");
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002879 /* Clear the event handler busy flag (RW1C);
2880 * the event ring should be empty.
Sarah Sharpbda53142010-07-29 22:12:38 -07002881 */
Sarah Sharpf7b2e402014-01-30 13:27:49 -08002882 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
Sarah Sharp477632d2014-01-29 14:02:00 -08002883 xhci_write_64(xhci, temp_64 | ERST_EHB,
2884 &xhci->ir_set->erst_dequeue);
Felipe Balbi76a35292017-01-23 14:20:07 +02002885 ret = IRQ_HANDLED;
2886 goto out;
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002887 }
2888
2889 event_ring_deq = xhci->event_ring->dequeue;
2890 /* FIXME this should be a delayed service routine
2891 * that clears the EHB.
2892 */
Peter Chendc0ffbe2019-11-15 18:50:00 +02002893 while (xhci_handle_event(xhci) > 0) {
2894 if (event_loop++ < TRBS_PER_SEGMENT / 2)
2895 continue;
2896 xhci_update_erst_dequeue(xhci, event_ring_deq);
2897 event_loop = 0;
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002898 }
Sarah Sharpbda53142010-07-29 22:12:38 -07002899
Peter Chendc0ffbe2019-11-15 18:50:00 +02002900 xhci_update_erst_dequeue(xhci, event_ring_deq);
Felipe Balbi76a35292017-01-23 14:20:07 +02002901 ret = IRQ_HANDLED;
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002902
Felipe Balbi76a35292017-01-23 14:20:07 +02002903out:
Alan Stern63aea0d2017-05-17 18:32:03 +03002904 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp9032cd52010-07-29 22:12:29 -07002905
Felipe Balbi76a35292017-01-23 14:20:07 +02002906 return ret;
Sarah Sharp9032cd52010-07-29 22:12:29 -07002907}
2908
Alex Shi851ec162013-05-24 10:54:19 +08002909irqreturn_t xhci_msi_irq(int irq, void *hcd)
Sarah Sharp9032cd52010-07-29 22:12:29 -07002910{
Alan Stern968b8222011-11-03 12:03:38 -04002911 return xhci_irq(hcd);
Sarah Sharp9032cd52010-07-29 22:12:29 -07002912}
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002913
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002914/**** Endpoint Ring Operations ****/
2915
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002916/*
2917 * Generic function for queueing a TRB on a ring.
2918 * The caller must have checked to make sure there's room on the ring.
Sarah Sharp6cc30d82010-06-10 12:25:28 -07002919 *
2920 * @more_trbs_coming: Will you enqueue more TRBs before calling
2921 * prepare_transfer()?
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002922 */
2923static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
Andiry Xu3b72fca2012-03-05 17:49:32 +08002924 bool more_trbs_coming,
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002925 u32 field1, u32 field2, u32 field3, u32 field4)
2926{
2927 struct xhci_generic_trb *trb;
2928
2929 trb = &ring->enqueue->generic;
Matt Evans28ccd292011-03-29 13:40:46 +11002930 trb->field[0] = cpu_to_le32(field1);
2931 trb->field[1] = cpu_to_le32(field2);
2932 trb->field[2] = cpu_to_le32(field3);
2933 trb->field[3] = cpu_to_le32(field4);
Felipe Balbia37c3f72017-01-23 14:20:19 +02002934
2935 trace_xhci_queue_trb(ring, trb);
2936
Andiry Xu3b72fca2012-03-05 17:49:32 +08002937 inc_enq(xhci, ring, more_trbs_coming);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002938}
2939
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002940/*
2941 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2942 * FIXME allocate segments if the ring is full.
2943 */
2944static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
Andiry Xu3b72fca2012-03-05 17:49:32 +08002945 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002946{
Andiry Xu8dfec612012-03-05 17:49:37 +08002947 unsigned int num_trbs_needed;
2948
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002949 /* Make sure the endpoint has been added to xHC schedule */
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002950 switch (ep_state) {
2951 case EP_STATE_DISABLED:
2952 /*
2953 * USB core changed config/interfaces without notifying us,
2954 * or hardware is reporting the wrong state.
2955 */
2956 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2957 return -ENOENT;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002958 case EP_STATE_ERROR:
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07002959 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002960 /* FIXME event handling code for error needs to clear it */
2961 /* XXX not sure if this should be -ENOENT or not */
2962 return -EINVAL;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07002963 case EP_STATE_HALTED:
2964 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002965 case EP_STATE_STOPPED:
2966 case EP_STATE_RUNNING:
2967 break;
2968 default:
2969 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2970 /*
2971 * FIXME issue Configure Endpoint command to try to get the HC
2972 * back into a known state.
2973 */
2974 return -EINVAL;
2975 }
Andiry Xu8dfec612012-03-05 17:49:37 +08002976
2977 while (1) {
Sarah Sharp3d4b81e2014-01-31 11:52:57 -08002978 if (room_on_ring(xhci, ep_ring, num_trbs))
2979 break;
Andiry Xu8dfec612012-03-05 17:49:37 +08002980
2981 if (ep_ring == xhci->cmd_ring) {
2982 xhci_err(xhci, "Do not support expand command ring\n");
2983 return -ENOMEM;
2984 }
2985
Xenia Ragiadakou68ffb012013-08-14 06:33:56 +03002986 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2987 "ERROR no room on ep ring, try ring expansion");
Andiry Xu8dfec612012-03-05 17:49:37 +08002988 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2989 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2990 mem_flags)) {
2991 xhci_err(xhci, "Ring expansion failed\n");
2992 return -ENOMEM;
2993 }
Peter Senna Tschudin261fa122012-09-12 19:03:17 +02002994 }
John Youn6c12db92010-05-10 15:33:00 -07002995
Mathias Nymand0c77d82016-06-21 10:58:07 +03002996 while (trb_is_link(ep_ring->enqueue)) {
2997 /* If we're not dealing with 0.95 hardware or isoc rings
2998 * on AMD 0.96 host, clear the chain bit.
2999 */
3000 if (!xhci_link_trb_quirk(xhci) &&
3001 !(ep_ring->type == TYPE_ISOC &&
3002 (xhci->quirks & XHCI_AMD_0x96_HOST)))
3003 ep_ring->enqueue->link.control &=
3004 cpu_to_le32(~TRB_CHAIN);
3005 else
3006 ep_ring->enqueue->link.control |=
3007 cpu_to_le32(TRB_CHAIN);
John Youn6c12db92010-05-10 15:33:00 -07003008
Mathias Nymand0c77d82016-06-21 10:58:07 +03003009 wmb();
3010 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
John Youn6c12db92010-05-10 15:33:00 -07003011
Mathias Nymand0c77d82016-06-21 10:58:07 +03003012 /* Toggle the cycle bit after the last ring segment. */
3013 if (link_trb_toggles_cycle(ep_ring->enqueue))
3014 ep_ring->cycle_state ^= 1;
John Youn6c12db92010-05-10 15:33:00 -07003015
Mathias Nymand0c77d82016-06-21 10:58:07 +03003016 ep_ring->enq_seg = ep_ring->enq_seg->next;
3017 ep_ring->enqueue = ep_ring->enq_seg->trbs;
John Youn6c12db92010-05-10 15:33:00 -07003018 }
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003019 return 0;
3020}
3021
Sarah Sharp23e3be12009-04-29 19:05:20 -07003022static int prepare_transfer(struct xhci_hcd *xhci,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003023 struct xhci_virt_device *xdev,
3024 unsigned int ep_index,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003025 unsigned int stream_id,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003026 unsigned int num_trbs,
3027 struct urb *urb,
Andiry Xu8e51adc2010-07-22 15:23:31 -07003028 unsigned int td_index,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003029 gfp_t mem_flags)
3030{
3031 int ret;
Andiry Xu8e51adc2010-07-22 15:23:31 -07003032 struct urb_priv *urb_priv;
3033 struct xhci_td *td;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003034 struct xhci_ring *ep_ring;
John Yound115b042009-07-27 12:05:15 -07003035 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003036
3037 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
3038 if (!ep_ring) {
3039 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3040 stream_id);
3041 return -EINVAL;
3042 }
3043
Mathias Nyman5071e6b2016-11-11 15:13:28 +02003044 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
Andiry Xu3b72fca2012-03-05 17:49:32 +08003045 num_trbs, mem_flags);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003046 if (ret)
3047 return ret;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003048
Andiry Xu8e51adc2010-07-22 15:23:31 -07003049 urb_priv = urb->hcpriv;
Mathias Nyman7e64b032017-01-23 14:20:26 +02003050 td = &urb_priv->td[td_index];
Andiry Xu8e51adc2010-07-22 15:23:31 -07003051
3052 INIT_LIST_HEAD(&td->td_list);
3053 INIT_LIST_HEAD(&td->cancelled_td_list);
3054
3055 if (td_index == 0) {
Sarah Sharp214f76f2010-10-26 11:22:02 -07003056 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
Sarah Sharpd13565c2011-07-22 14:34:34 -07003057 if (unlikely(ret))
Andiry Xu8e51adc2010-07-22 15:23:31 -07003058 return ret;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003059 }
3060
Andiry Xu8e51adc2010-07-22 15:23:31 -07003061 td->urb = urb;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003062 /* Add this TD to the tail of the endpoint ring's TD list */
Andiry Xu8e51adc2010-07-22 15:23:31 -07003063 list_add_tail(&td->td_list, &ep_ring->td_list);
3064 td->start_seg = ep_ring->enq_seg;
3065 td->first_trb = ep_ring->enqueue;
3066
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003067 return 0;
3068}
3069
Lu Baolu67d2ea92017-12-08 17:59:09 +02003070unsigned int count_trbs(u64 addr, u64 len)
Sarah Sharp8a96c052009-04-27 19:59:19 -07003071{
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003072 unsigned int num_trbs;
Sarah Sharp8a96c052009-04-27 19:59:19 -07003073
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003074 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3075 TRB_MAX_BUFF_SIZE);
3076 if (num_trbs == 0)
3077 num_trbs++;
Sarah Sharp8a96c052009-04-27 19:59:19 -07003078
Sarah Sharp8a96c052009-04-27 19:59:19 -07003079 return num_trbs;
3080}
3081
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003082static inline unsigned int count_trbs_needed(struct urb *urb)
Sarah Sharp8a96c052009-04-27 19:59:19 -07003083{
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003084 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
3085}
3086
3087static unsigned int count_sg_trbs_needed(struct urb *urb)
3088{
3089 struct scatterlist *sg;
3090 unsigned int i, len, full_len, num_trbs = 0;
3091
3092 full_len = urb->transfer_buffer_length;
3093
3094 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
3095 len = sg_dma_len(sg);
3096 num_trbs += count_trbs(sg_dma_address(sg), len);
3097 len = min_t(unsigned int, len, full_len);
3098 full_len -= len;
3099 if (full_len == 0)
3100 break;
3101 }
3102
3103 return num_trbs;
3104}
3105
3106static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
3107{
3108 u64 addr, len;
3109
3110 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3111 len = urb->iso_frame_desc[i].length;
3112
3113 return count_trbs(addr, len);
3114}
3115
3116static void check_trb_math(struct urb *urb, int running_total)
3117{
3118 if (unlikely(running_total != urb->transfer_buffer_length))
Paul Zimmermana2490182011-02-12 14:06:44 -08003119 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
Sarah Sharp8a96c052009-04-27 19:59:19 -07003120 "queued %#x (%d), asked for %#x (%d)\n",
3121 __func__,
3122 urb->ep->desc.bEndpointAddress,
3123 running_total, running_total,
3124 urb->transfer_buffer_length,
3125 urb->transfer_buffer_length);
3126}
3127
Sarah Sharp23e3be12009-04-29 19:05:20 -07003128static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003129 unsigned int ep_index, unsigned int stream_id, int start_cycle,
Andiry Xue1eab2e2011-01-04 16:30:39 -08003130 struct xhci_generic_trb *start_trb)
Sarah Sharp8a96c052009-04-27 19:59:19 -07003131{
Sarah Sharp8a96c052009-04-27 19:59:19 -07003132 /*
3133 * Pass all the TRBs to the hardware at once and make sure this write
3134 * isn't reordered.
3135 */
3136 wmb();
Andiry Xu50f7b522010-12-20 15:09:34 +08003137 if (start_cycle)
Matt Evans28ccd292011-03-29 13:40:46 +11003138 start_trb->field[3] |= cpu_to_le32(start_cycle);
Andiry Xu50f7b522010-12-20 15:09:34 +08003139 else
Matt Evans28ccd292011-03-29 13:40:46 +11003140 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
Andiry Xube88fe42010-10-14 07:22:57 -07003141 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
Sarah Sharp8a96c052009-04-27 19:59:19 -07003142}
3143
Alexandr Ivanov78140152016-04-22 13:17:11 +03003144static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3145 struct xhci_ep_ctx *ep_ctx)
Sarah Sharp624defa2009-09-02 12:14:28 -07003146{
Sarah Sharp624defa2009-09-02 12:14:28 -07003147 int xhci_interval;
3148 int ep_interval;
3149
Matt Evans28ccd292011-03-29 13:40:46 +11003150 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
Sarah Sharp624defa2009-09-02 12:14:28 -07003151 ep_interval = urb->interval;
Alexandr Ivanov78140152016-04-22 13:17:11 +03003152
Sarah Sharp624defa2009-09-02 12:14:28 -07003153 /* Convert to microframes */
3154 if (urb->dev->speed == USB_SPEED_LOW ||
3155 urb->dev->speed == USB_SPEED_FULL)
3156 ep_interval *= 8;
Alexandr Ivanov78140152016-04-22 13:17:11 +03003157
Sarah Sharp624defa2009-09-02 12:14:28 -07003158 /* FIXME change this to a warning and a suggestion to use the new API
3159 * to set the polling interval (once the API is added).
3160 */
3161 if (xhci_interval != ep_interval) {
Dmitry Kasatkin0730d522013-08-27 17:47:35 +03003162 dev_dbg_ratelimited(&urb->dev->dev,
3163 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3164 ep_interval, ep_interval == 1 ? "" : "s",
3165 xhci_interval, xhci_interval == 1 ? "" : "s");
Sarah Sharp624defa2009-09-02 12:14:28 -07003166 urb->interval = xhci_interval;
3167 /* Convert back to frames for LS/FS devices */
3168 if (urb->dev->speed == USB_SPEED_LOW ||
3169 urb->dev->speed == USB_SPEED_FULL)
3170 urb->interval /= 8;
3171 }
Alexandr Ivanov78140152016-04-22 13:17:11 +03003172}
3173
3174/*
3175 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3176 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3177 * (comprised of sg list entries) can take several service intervals to
3178 * transmit.
3179 */
3180int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3181 struct urb *urb, int slot_id, unsigned int ep_index)
3182{
3183 struct xhci_ep_ctx *ep_ctx;
3184
3185 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3186 check_interval(xhci, urb, ep_ctx);
3187
Dan Carpenter3fc82062012-03-28 10:30:26 +03003188 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
Sarah Sharp624defa2009-09-02 12:14:28 -07003189}
3190
Sarah Sharp04dd9502009-11-11 10:28:30 -08003191/*
Sarah Sharp4525c0a2012-10-25 15:56:40 -07003192 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3193 * packets remaining in the TD (*not* including this TRB).
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003194 *
3195 * Total TD packet count = total_packet_count =
Sarah Sharp4525c0a2012-10-25 15:56:40 -07003196 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003197 *
3198 * Packets transferred up to and including this TRB = packets_transferred =
3199 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3200 *
3201 * TD size = total_packet_count - packets_transferred
3202 *
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003203 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3204 * including this TRB, right shifted by 10
3205 *
3206 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3207 * This is taken care of in the TRB_TD_SIZE() macro
3208 *
Sarah Sharp4525c0a2012-10-25 15:56:40 -07003209 * The last TRB in a TD must have the TD size set to zero.
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003210 */
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003211static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3212 int trb_buff_len, unsigned int td_total_len,
Mathias Nyman124c3932016-06-21 10:57:59 +03003213 struct urb *urb, bool more_trbs_coming)
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003214{
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003215 u32 maxp, total_packet_count;
3216
Chunfeng Yun72b663a2017-12-08 18:10:06 +02003217 /* MTK xHCI 0.96 contains some features from 1.0 */
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003218 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003219 return ((td_total_len - transferred) >> 10);
3220
Sarah Sharp48df4a62011-08-12 10:23:01 -07003221 /* One TRB with a zero-length data packet. */
Mathias Nyman124c3932016-06-21 10:57:59 +03003222 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003223 trb_buff_len == td_total_len)
Sarah Sharp48df4a62011-08-12 10:23:01 -07003224 return 0;
3225
Chunfeng Yun72b663a2017-12-08 18:10:06 +02003226 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
3227 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003228 trb_buff_len = 0;
3229
Felipe Balbi734d3dd2016-09-28 13:46:37 +03003230 maxp = usb_endpoint_maxp(&urb->ep->desc);
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003231 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3232
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003233 /* Queueing functions don't count the current TRB into transferred */
3234 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003235}
3236
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003237
Mathias Nyman474ed232016-06-21 10:58:01 +03003238static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003239 u32 *trb_buff_len, struct xhci_segment *seg)
Mathias Nyman474ed232016-06-21 10:58:01 +03003240{
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003241 struct device *dev = xhci_to_hcd(xhci)->self.controller;
Mathias Nyman474ed232016-06-21 10:58:01 +03003242 unsigned int unalign;
3243 unsigned int max_pkt;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003244 u32 new_buff_len;
Henry Lin597c56e2019-05-22 14:33:57 +03003245 size_t len;
Mathias Nyman474ed232016-06-21 10:58:01 +03003246
Felipe Balbi734d3dd2016-09-28 13:46:37 +03003247 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
Mathias Nyman474ed232016-06-21 10:58:01 +03003248 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3249
3250 /* we got lucky, last normal TRB data on segment is packet aligned */
3251 if (unalign == 0)
3252 return 0;
3253
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003254 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3255 unalign, *trb_buff_len);
3256
Mathias Nyman474ed232016-06-21 10:58:01 +03003257 /* is the last nornal TRB alignable by splitting it */
3258 if (*trb_buff_len > unalign) {
3259 *trb_buff_len -= unalign;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003260 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
Mathias Nyman474ed232016-06-21 10:58:01 +03003261 return 0;
3262 }
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003263
3264 /*
3265 * We want enqd_len + trb_buff_len to sum up to a number aligned to
3266 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
3267 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
3268 */
3269 new_buff_len = max_pkt - (enqd_len % max_pkt);
3270
3271 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3272 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3273
3274 /* create a max max_pkt sized bounce buffer pointed to by last trb */
3275 if (usb_urb_dir_out(urb)) {
Henry Lin597c56e2019-05-22 14:33:57 +03003276 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003277 seg->bounce_buf, new_buff_len, enqd_len);
Mathias Nymanc03101ff2019-10-04 14:59:26 +03003278 if (len != new_buff_len)
Henry Lin597c56e2019-05-22 14:33:57 +03003279 xhci_warn(xhci,
Fabio Estevamc1a145a2019-05-22 10:35:29 -03003280 "WARN Wrong bounce buffer write length: %zu != %d\n",
Mathias Nymanc03101ff2019-10-04 14:59:26 +03003281 len, new_buff_len);
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003282 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3283 max_pkt, DMA_TO_DEVICE);
3284 } else {
3285 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3286 max_pkt, DMA_FROM_DEVICE);
3287 }
3288
3289 if (dma_mapping_error(dev, seg->bounce_dma)) {
3290 /* try without aligning. Some host controllers survive */
3291 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3292 return 0;
3293 }
3294 *trb_buff_len = new_buff_len;
3295 seg->bounce_len = new_buff_len;
3296 seg->bounce_offs = enqd_len;
3297
3298 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3299
Mathias Nyman474ed232016-06-21 10:58:01 +03003300 return 1;
3301}
3302
Sarah Sharpb10de142009-04-27 19:58:50 -07003303/* This is very similar to what ehci-q.c qtd_fill() does */
Sarah Sharp23e3be12009-04-29 19:05:20 -07003304int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
Sarah Sharpb10de142009-04-27 19:58:50 -07003305 struct urb *urb, int slot_id, unsigned int ep_index)
3306{
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003307 struct xhci_ring *ring;
Andiry Xu8e51adc2010-07-22 15:23:31 -07003308 struct urb_priv *urb_priv;
Sarah Sharpb10de142009-04-27 19:58:50 -07003309 struct xhci_td *td;
Sarah Sharpb10de142009-04-27 19:58:50 -07003310 struct xhci_generic_trb *start_trb;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003311 struct scatterlist *sg = NULL;
Mathias Nyman5a83f042016-06-21 10:57:58 +03003312 bool more_trbs_coming = true;
3313 bool need_zero_pkt = false;
Mathias Nyman86065c22016-06-21 10:58:00 +03003314 bool first_trb = true;
3315 unsigned int num_trbs;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003316 unsigned int start_cycle, num_sgs = 0;
Mathias Nyman86065c22016-06-21 10:58:00 +03003317 unsigned int enqd_len, block_len, trb_buff_len, full_len;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003318 int sent_len, ret;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003319 u32 field, length_field, remainder;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003320 u64 addr, send_addr;
Sarah Sharpb10de142009-04-27 19:58:50 -07003321
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003322 ring = xhci_urb_to_transfer_ring(xhci, urb);
3323 if (!ring)
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003324 return -EINVAL;
Sarah Sharpb10de142009-04-27 19:58:50 -07003325
Mathias Nyman86065c22016-06-21 10:58:00 +03003326 full_len = urb->transfer_buffer_length;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003327 /* If we have scatter/gather list, we use it. */
3328 if (urb->num_sgs) {
3329 num_sgs = urb->num_mapped_sgs;
3330 sg = urb->sg;
Mathias Nyman86065c22016-06-21 10:58:00 +03003331 addr = (u64) sg_dma_address(sg);
3332 block_len = sg_dma_len(sg);
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003333 num_trbs = count_sg_trbs_needed(urb);
Mathias Nyman86065c22016-06-21 10:58:00 +03003334 } else {
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003335 num_trbs = count_trbs_needed(urb);
Mathias Nyman86065c22016-06-21 10:58:00 +03003336 addr = (u64) urb->transfer_dma;
3337 block_len = full_len;
3338 }
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003339 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3340 ep_index, urb->stream_id,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003341 num_trbs, urb, 0, mem_flags);
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003342 if (unlikely(ret < 0))
Sarah Sharpb10de142009-04-27 19:58:50 -07003343 return ret;
3344
Andiry Xu8e51adc2010-07-22 15:23:31 -07003345 urb_priv = urb->hcpriv;
Reyad Attiyat4758dcd2015-08-06 19:23:58 +03003346
3347 /* Deal with URB_ZERO_PACKET - need one more td/trb */
Mathias Nyman9ef7fbb2017-01-23 14:20:25 +02003348 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
Mathias Nyman5a83f042016-06-21 10:57:58 +03003349 need_zero_pkt = true;
Reyad Attiyat4758dcd2015-08-06 19:23:58 +03003350
Mathias Nyman7e64b032017-01-23 14:20:26 +02003351 td = &urb_priv->td[0];
Andiry Xu8e51adc2010-07-22 15:23:31 -07003352
Sarah Sharpb10de142009-04-27 19:58:50 -07003353 /*
3354 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3355 * until we've finished creating all the other TRBs. The ring's cycle
3356 * state may change as we enqueue the other TRBs, so save it too.
3357 */
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003358 start_trb = &ring->enqueue->generic;
3359 start_cycle = ring->cycle_state;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003360 send_addr = addr;
Sarah Sharpb10de142009-04-27 19:58:50 -07003361
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003362 /* Queue the TRBs, even if they are zero-length */
Alban Browaeys0d2daad2016-08-16 10:18:04 +03003363 for (enqd_len = 0; first_trb || enqd_len < full_len;
3364 enqd_len += trb_buff_len) {
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003365 field = TRB_TYPE(TRB_NORMAL);
3366
Mathias Nyman86065c22016-06-21 10:58:00 +03003367 /* TRB buffer should not cross 64KB boundaries */
3368 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3369 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003370
Mathias Nyman86065c22016-06-21 10:58:00 +03003371 if (enqd_len + trb_buff_len > full_len)
3372 trb_buff_len = full_len - enqd_len;
Sarah Sharpb10de142009-04-27 19:58:50 -07003373
3374 /* Don't change the cycle bit of the first TRB until later */
Mathias Nyman86065c22016-06-21 10:58:00 +03003375 if (first_trb) {
3376 first_trb = false;
Andiry Xu50f7b522010-12-20 15:09:34 +08003377 if (start_cycle == 0)
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003378 field |= TRB_CYCLE;
Andiry Xu50f7b522010-12-20 15:09:34 +08003379 } else
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003380 field |= ring->cycle_state;
Sarah Sharpb10de142009-04-27 19:58:50 -07003381
3382 /* Chain all the TRBs together; clear the chain bit in the last
3383 * TRB to indicate it's the last TRB in the chain.
3384 */
Mathias Nyman86065c22016-06-21 10:58:00 +03003385 if (enqd_len + trb_buff_len < full_len) {
Sarah Sharpb10de142009-04-27 19:58:50 -07003386 field |= TRB_CHAIN;
Mathias Nyman2d98ef42016-06-21 10:58:04 +03003387 if (trb_is_link(ring->enqueue + 1)) {
Mathias Nyman474ed232016-06-21 10:58:01 +03003388 if (xhci_align_td(xhci, urb, enqd_len,
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003389 &trb_buff_len,
3390 ring->enq_seg)) {
3391 send_addr = ring->enq_seg->bounce_dma;
3392 /* assuming TD won't span 2 segs */
3393 td->bounce_seg = ring->enq_seg;
3394 }
Mathias Nyman474ed232016-06-21 10:58:01 +03003395 }
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003396 }
3397 if (enqd_len + trb_buff_len >= full_len) {
3398 field &= ~TRB_CHAIN;
Sarah Sharpb10de142009-04-27 19:58:50 -07003399 field |= TRB_IOC;
Mathias Nyman124c3932016-06-21 10:57:59 +03003400 more_trbs_coming = false;
Mathias Nyman5a83f042016-06-21 10:57:58 +03003401 td->last_trb = ring->enqueue;
Nicolas Saenz Julienne33e39352019-04-26 16:23:29 +03003402
3403 if (xhci_urb_suitable_for_idt(urb)) {
3404 memcpy(&send_addr, urb->transfer_buffer,
3405 trb_buff_len);
Samuel Hollandbfa3dbb2019-10-25 17:30:28 +03003406 le64_to_cpus(&send_addr);
Nicolas Saenz Julienne33e39352019-04-26 16:23:29 +03003407 field |= TRB_IDT;
3408 }
Sarah Sharpb10de142009-04-27 19:58:50 -07003409 }
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003410
3411 /* Only set interrupt on short packet for IN endpoints */
3412 if (usb_urb_dir_in(urb))
3413 field |= TRB_ISP;
3414
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003415 /* Set the TRB length, TD size, and interrupter fields. */
Mathias Nyman86065c22016-06-21 10:58:00 +03003416 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3417 full_len, urb, more_trbs_coming);
3418
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003419 length_field = TRB_LEN(trb_buff_len) |
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003420 TRB_TD_SIZE(remainder) |
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003421 TRB_INTR_TARGET(0);
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003422
Mathias Nyman124c3932016-06-21 10:57:59 +03003423 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003424 lower_32_bits(send_addr),
3425 upper_32_bits(send_addr),
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003426 length_field,
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003427 field);
3428
Sarah Sharpb10de142009-04-27 19:58:50 -07003429 addr += trb_buff_len;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003430 sent_len = trb_buff_len;
Sarah Sharpb10de142009-04-27 19:58:50 -07003431
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003432 while (sg && sent_len >= block_len) {
Mathias Nyman86065c22016-06-21 10:58:00 +03003433 /* New sg entry */
3434 --num_sgs;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003435 sent_len -= block_len;
Sriharsha Allenki3c6f8cb2020-05-14 14:04:31 +03003436 sg = sg_next(sg);
3437 if (num_sgs != 0 && sg) {
Mathias Nyman86065c22016-06-21 10:58:00 +03003438 block_len = sg_dma_len(sg);
3439 addr = (u64) sg_dma_address(sg);
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003440 addr += sent_len;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003441 }
3442 }
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003443 block_len -= sent_len;
3444 send_addr = addr;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003445 }
3446
Mathias Nyman5a83f042016-06-21 10:57:58 +03003447 if (need_zero_pkt) {
3448 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3449 ep_index, urb->stream_id,
3450 1, urb, 1, mem_flags);
Mathias Nyman7e64b032017-01-23 14:20:26 +02003451 urb_priv->td[1].last_trb = ring->enqueue;
Mathias Nyman5a83f042016-06-21 10:57:58 +03003452 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3453 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3454 }
3455
Mathias Nyman86065c22016-06-21 10:58:00 +03003456 check_trb_math(urb, enqd_len);
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003457 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
Andiry Xue1eab2e2011-01-04 16:30:39 -08003458 start_cycle, start_trb);
Sarah Sharpb10de142009-04-27 19:58:50 -07003459 return 0;
3460}
3461
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003462/* Caller must have locked xhci->lock */
Sarah Sharp23e3be12009-04-29 19:05:20 -07003463int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003464 struct urb *urb, int slot_id, unsigned int ep_index)
3465{
3466 struct xhci_ring *ep_ring;
3467 int num_trbs;
3468 int ret;
3469 struct usb_ctrlrequest *setup;
3470 struct xhci_generic_trb *start_trb;
3471 int start_cycle;
Lu Baolufb79a6d2017-01-23 14:20:01 +02003472 u32 field;
Andiry Xu8e51adc2010-07-22 15:23:31 -07003473 struct urb_priv *urb_priv;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003474 struct xhci_td *td;
3475
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003476 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3477 if (!ep_ring)
3478 return -EINVAL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003479
3480 /*
3481 * Need to copy setup packet into setup TRB, so we can't use the setup
3482 * DMA address.
3483 */
3484 if (!urb->setup_packet)
3485 return -EINVAL;
3486
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003487 /* 1 TRB for setup, 1 for status */
3488 num_trbs = 2;
3489 /*
3490 * Don't need to check if we need additional event data and normal TRBs,
3491 * since data in control transfers will never get bigger than 16MB
3492 * XXX: can we get a buffer that crosses 64KB boundaries?
3493 */
3494 if (urb->transfer_buffer_length > 0)
3495 num_trbs++;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003496 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3497 ep_index, urb->stream_id,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003498 num_trbs, urb, 0, mem_flags);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003499 if (ret < 0)
3500 return ret;
3501
Andiry Xu8e51adc2010-07-22 15:23:31 -07003502 urb_priv = urb->hcpriv;
Mathias Nyman7e64b032017-01-23 14:20:26 +02003503 td = &urb_priv->td[0];
Andiry Xu8e51adc2010-07-22 15:23:31 -07003504
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003505 /*
3506 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3507 * until we've finished creating all the other TRBs. The ring's cycle
3508 * state may change as we enqueue the other TRBs, so save it too.
3509 */
3510 start_trb = &ep_ring->enqueue->generic;
3511 start_cycle = ep_ring->cycle_state;
3512
3513 /* Queue setup TRB - see section 6.4.1.2.1 */
3514 /* FIXME better way to translate setup_packet into two u32 fields? */
3515 setup = (struct usb_ctrlrequest *) urb->setup_packet;
Andiry Xu50f7b522010-12-20 15:09:34 +08003516 field = 0;
3517 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3518 if (start_cycle == 0)
3519 field |= 0x1;
Andiry Xub83cdc82011-05-05 18:13:56 +08003520
Mathias Nymandca77942015-09-21 17:46:16 +03003521 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003522 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
Andiry Xub83cdc82011-05-05 18:13:56 +08003523 if (urb->transfer_buffer_length > 0) {
3524 if (setup->bRequestType & USB_DIR_IN)
3525 field |= TRB_TX_TYPE(TRB_DATA_IN);
3526 else
3527 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3528 }
3529 }
3530
Andiry Xu3b72fca2012-03-05 17:49:32 +08003531 queue_trb(xhci, ep_ring, true,
Matt Evans28ccd292011-03-29 13:40:46 +11003532 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3533 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3534 TRB_LEN(8) | TRB_INTR_TARGET(0),
3535 /* Immediate data in pointer */
3536 field);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003537
3538 /* If there's data, queue data TRBs */
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003539 /* Only set interrupt on short packet for IN endpoints */
3540 if (usb_urb_dir_in(urb))
3541 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3542 else
3543 field = TRB_TYPE(TRB_DATA);
3544
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003545 if (urb->transfer_buffer_length > 0) {
Lu Baolufb79a6d2017-01-23 14:20:01 +02003546 u32 length_field, remainder;
Mathias Nyman13b82b72019-05-22 14:34:00 +03003547 u64 addr;
Lu Baolufb79a6d2017-01-23 14:20:01 +02003548
Nicolas Saenz Julienne33e39352019-04-26 16:23:29 +03003549 if (xhci_urb_suitable_for_idt(urb)) {
Mathias Nyman13b82b72019-05-22 14:34:00 +03003550 memcpy(&addr, urb->transfer_buffer,
Nicolas Saenz Julienne33e39352019-04-26 16:23:29 +03003551 urb->transfer_buffer_length);
Samuel Hollandbfa3dbb2019-10-25 17:30:28 +03003552 le64_to_cpus(&addr);
Nicolas Saenz Julienne33e39352019-04-26 16:23:29 +03003553 field |= TRB_IDT;
Mathias Nyman13b82b72019-05-22 14:34:00 +03003554 } else {
3555 addr = (u64) urb->transfer_dma;
Nicolas Saenz Julienne33e39352019-04-26 16:23:29 +03003556 }
3557
Lu Baolufb79a6d2017-01-23 14:20:01 +02003558 remainder = xhci_td_remainder(xhci, 0,
3559 urb->transfer_buffer_length,
3560 urb->transfer_buffer_length,
3561 urb, 1);
3562 length_field = TRB_LEN(urb->transfer_buffer_length) |
3563 TRB_TD_SIZE(remainder) |
3564 TRB_INTR_TARGET(0);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003565 if (setup->bRequestType & USB_DIR_IN)
3566 field |= TRB_DIR_IN;
Andiry Xu3b72fca2012-03-05 17:49:32 +08003567 queue_trb(xhci, ep_ring, true,
Mathias Nyman13b82b72019-05-22 14:34:00 +03003568 lower_32_bits(addr),
3569 upper_32_bits(addr),
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003570 length_field,
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003571 field | ep_ring->cycle_state);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003572 }
3573
3574 /* Save the DMA address of the last TRB in the TD */
3575 td->last_trb = ep_ring->enqueue;
3576
3577 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3578 /* If the device sent data, the status stage is an OUT transfer */
3579 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3580 field = 0;
3581 else
3582 field = TRB_DIR_IN;
Andiry Xu3b72fca2012-03-05 17:49:32 +08003583 queue_trb(xhci, ep_ring, false,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003584 0,
3585 0,
3586 TRB_INTR_TARGET(0),
3587 /* Event on completion */
3588 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3589
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003590 giveback_first_trb(xhci, slot_id, ep_index, 0,
Andiry Xue1eab2e2011-01-04 16:30:39 -08003591 start_cycle, start_trb);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003592 return 0;
3593}
3594
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003595/*
3596 * The transfer burst count field of the isochronous TRB defines the number of
3597 * bursts that are required to move all packets in this TD. Only SuperSpeed
3598 * devices can burst up to bMaxBurst number of packets per service interval.
3599 * This field is zero based, meaning a value of zero in the field means one
3600 * burst. Basically, for everything but SuperSpeed devices, this field will be
3601 * zero. Only xHCI 1.0 host controllers support this field.
3602 */
3603static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003604 struct urb *urb, unsigned int total_packet_count)
3605{
3606 unsigned int max_burst;
3607
Mathias Nyman09c352e2016-02-12 16:40:17 +02003608 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003609 return 0;
3610
3611 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
Mathias Nyman3213b152014-06-24 17:14:41 +03003612 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003613}
3614
Sarah Sharpb61d3782011-04-19 17:43:33 -07003615/*
3616 * Returns the number of packets in the last "burst" of packets. This field is
3617 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3618 * the last burst packet count is equal to the total number of packets in the
3619 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3620 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3621 * contain 1 to (bMaxBurst + 1) packets.
3622 */
3623static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
Sarah Sharpb61d3782011-04-19 17:43:33 -07003624 struct urb *urb, unsigned int total_packet_count)
3625{
3626 unsigned int max_burst;
3627 unsigned int residue;
3628
3629 if (xhci->hci_version < 0x100)
3630 return 0;
3631
Mathias Nyman09c352e2016-02-12 16:40:17 +02003632 if (urb->dev->speed >= USB_SPEED_SUPER) {
Sarah Sharpb61d3782011-04-19 17:43:33 -07003633 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3634 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3635 residue = total_packet_count % (max_burst + 1);
3636 /* If residue is zero, the last burst contains (max_burst + 1)
3637 * number of packets, but the TLBPC field is zero-based.
3638 */
3639 if (residue == 0)
3640 return max_burst;
3641 return residue - 1;
Sarah Sharpb61d3782011-04-19 17:43:33 -07003642 }
Mathias Nyman09c352e2016-02-12 16:40:17 +02003643 if (total_packet_count == 0)
3644 return 0;
3645 return total_packet_count - 1;
Sarah Sharpb61d3782011-04-19 17:43:33 -07003646}
3647
Lu Baolu79b80942015-08-06 19:24:00 +03003648/*
3649 * Calculates Frame ID field of the isochronous TRB identifies the
3650 * target frame that the Interval associated with this Isochronous
3651 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3652 *
3653 * Returns actual frame id on success, negative value on error.
3654 */
3655static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3656 struct urb *urb, int index)
3657{
3658 int start_frame, ist, ret = 0;
3659 int start_frame_id, end_frame_id, current_frame_id;
3660
3661 if (urb->dev->speed == USB_SPEED_LOW ||
3662 urb->dev->speed == USB_SPEED_FULL)
3663 start_frame = urb->start_frame + index * urb->interval;
3664 else
3665 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3666
3667 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3668 *
3669 * If bit [3] of IST is cleared to '0', software can add a TRB no
3670 * later than IST[2:0] Microframes before that TRB is scheduled to
3671 * be executed.
3672 * If bit [3] of IST is set to '1', software can add a TRB no later
3673 * than IST[2:0] Frames before that TRB is scheduled to be executed.
3674 */
3675 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3676 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3677 ist <<= 3;
3678
3679 /* Software shall not schedule an Isoch TD with a Frame ID value that
3680 * is less than the Start Frame ID or greater than the End Frame ID,
3681 * where:
3682 *
3683 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3684 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3685 *
3686 * Both the End Frame ID and Start Frame ID values are calculated
3687 * in microframes. When software determines the valid Frame ID value;
3688 * The End Frame ID value should be rounded down to the nearest Frame
3689 * boundary, and the Start Frame ID value should be rounded up to the
3690 * nearest Frame boundary.
3691 */
3692 current_frame_id = readl(&xhci->run_regs->microframe_index);
3693 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3694 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3695
3696 start_frame &= 0x7ff;
3697 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3698 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3699
3700 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3701 __func__, index, readl(&xhci->run_regs->microframe_index),
3702 start_frame_id, end_frame_id, start_frame);
3703
3704 if (start_frame_id < end_frame_id) {
3705 if (start_frame > end_frame_id ||
3706 start_frame < start_frame_id)
3707 ret = -EINVAL;
3708 } else if (start_frame_id > end_frame_id) {
3709 if ((start_frame > end_frame_id &&
3710 start_frame < start_frame_id))
3711 ret = -EINVAL;
3712 } else {
3713 ret = -EINVAL;
3714 }
3715
3716 if (index == 0) {
3717 if (ret == -EINVAL || start_frame == start_frame_id) {
3718 start_frame = start_frame_id + 1;
3719 if (urb->dev->speed == USB_SPEED_LOW ||
3720 urb->dev->speed == USB_SPEED_FULL)
3721 urb->start_frame = start_frame;
3722 else
3723 urb->start_frame = start_frame << 3;
3724 ret = 0;
3725 }
3726 }
3727
3728 if (ret) {
3729 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3730 start_frame, current_frame_id, index,
3731 start_frame_id, end_frame_id);
3732 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3733 return ret;
3734 }
3735
3736 return start_frame;
3737}
3738
Andiry Xu04e51902010-07-22 15:23:39 -07003739/* This is for isoc transfer */
3740static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3741 struct urb *urb, int slot_id, unsigned int ep_index)
3742{
3743 struct xhci_ring *ep_ring;
3744 struct urb_priv *urb_priv;
3745 struct xhci_td *td;
3746 int num_tds, trbs_per_td;
3747 struct xhci_generic_trb *start_trb;
3748 bool first_trb;
3749 int start_cycle;
3750 u32 field, length_field;
3751 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3752 u64 start_addr, addr;
3753 int i, j;
Andiry Xu47cbf692010-12-20 14:49:48 +08003754 bool more_trbs_coming;
Lu Baolu79b80942015-08-06 19:24:00 +03003755 struct xhci_virt_ep *xep;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003756 int frame_id;
Andiry Xu04e51902010-07-22 15:23:39 -07003757
Lu Baolu79b80942015-08-06 19:24:00 +03003758 xep = &xhci->devs[slot_id]->eps[ep_index];
Andiry Xu04e51902010-07-22 15:23:39 -07003759 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3760
3761 num_tds = urb->number_of_packets;
3762 if (num_tds < 1) {
3763 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3764 return -EINVAL;
3765 }
Andiry Xu04e51902010-07-22 15:23:39 -07003766 start_addr = (u64) urb->transfer_dma;
3767 start_trb = &ep_ring->enqueue->generic;
3768 start_cycle = ep_ring->cycle_state;
3769
Sarah Sharp522989a2011-07-29 12:44:32 -07003770 urb_priv = urb->hcpriv;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003771 /* Queue the TRBs for each TD, even if they are zero-length */
Andiry Xu04e51902010-07-22 15:23:39 -07003772 for (i = 0; i < num_tds; i++) {
Mathias Nyman09c352e2016-02-12 16:40:17 +02003773 unsigned int total_pkt_count, max_pkt;
3774 unsigned int burst_count, last_burst_pkt_count;
3775 u32 sia_frame_id;
Andiry Xu04e51902010-07-22 15:23:39 -07003776
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003777 first_trb = true;
Andiry Xu04e51902010-07-22 15:23:39 -07003778 running_total = 0;
3779 addr = start_addr + urb->iso_frame_desc[i].offset;
3780 td_len = urb->iso_frame_desc[i].length;
3781 td_remain_len = td_len;
Felipe Balbi734d3dd2016-09-28 13:46:37 +03003782 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
Mathias Nyman09c352e2016-02-12 16:40:17 +02003783 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
3784
Sarah Sharp48df4a62011-08-12 10:23:01 -07003785 /* A zero-length transfer still involves at least one packet. */
Mathias Nyman09c352e2016-02-12 16:40:17 +02003786 if (total_pkt_count == 0)
3787 total_pkt_count++;
3788 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
3789 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
3790 urb, total_pkt_count);
Andiry Xu04e51902010-07-22 15:23:39 -07003791
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003792 trbs_per_td = count_isoc_trbs_needed(urb, i);
Andiry Xu04e51902010-07-22 15:23:39 -07003793
3794 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003795 urb->stream_id, trbs_per_td, urb, i, mem_flags);
Sarah Sharp522989a2011-07-29 12:44:32 -07003796 if (ret < 0) {
3797 if (i == 0)
3798 return ret;
3799 goto cleanup;
3800 }
Mathias Nyman7e64b032017-01-23 14:20:26 +02003801 td = &urb_priv->td[i];
Mathias Nyman09c352e2016-02-12 16:40:17 +02003802
3803 /* use SIA as default, if frame id is used overwrite it */
3804 sia_frame_id = TRB_SIA;
3805 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3806 HCC_CFC(xhci->hcc_params)) {
3807 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
3808 if (frame_id >= 0)
3809 sia_frame_id = TRB_FRAME_ID(frame_id);
3810 }
3811 /*
3812 * Set isoc specific data for the first TRB in a TD.
3813 * Prevent HW from getting the TRBs by keeping the cycle state
3814 * inverted in the first TDs isoc TRB.
3815 */
Mathias Nyman2f6d3b62016-02-12 16:40:18 +02003816 field = TRB_TYPE(TRB_ISOC) |
Mathias Nyman09c352e2016-02-12 16:40:17 +02003817 TRB_TLBPC(last_burst_pkt_count) |
3818 sia_frame_id |
3819 (i ? ep_ring->cycle_state : !start_cycle);
3820
Mathias Nyman2f6d3b62016-02-12 16:40:18 +02003821 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
3822 if (!xep->use_extended_tbc)
3823 field |= TRB_TBC(burst_count);
3824
Mathias Nyman09c352e2016-02-12 16:40:17 +02003825 /* fill the rest of the TRB fields, and remaining normal TRBs */
Andiry Xu04e51902010-07-22 15:23:39 -07003826 for (j = 0; j < trbs_per_td; j++) {
3827 u32 remainder = 0;
Andiry Xu04e51902010-07-22 15:23:39 -07003828
Mathias Nyman09c352e2016-02-12 16:40:17 +02003829 /* only first TRB is isoc, overwrite otherwise */
3830 if (!first_trb)
3831 field = TRB_TYPE(TRB_NORMAL) |
3832 ep_ring->cycle_state;
Andiry Xu04e51902010-07-22 15:23:39 -07003833
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003834 /* Only set interrupt on short packet for IN EPs */
3835 if (usb_urb_dir_in(urb))
3836 field |= TRB_ISP;
3837
Mathias Nyman09c352e2016-02-12 16:40:17 +02003838 /* Set the chain bit for all except the last TRB */
Andiry Xu04e51902010-07-22 15:23:39 -07003839 if (j < trbs_per_td - 1) {
Andiry Xu47cbf692010-12-20 14:49:48 +08003840 more_trbs_coming = true;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003841 field |= TRB_CHAIN;
Andiry Xu04e51902010-07-22 15:23:39 -07003842 } else {
Mathias Nyman09c352e2016-02-12 16:40:17 +02003843 more_trbs_coming = false;
Andiry Xu04e51902010-07-22 15:23:39 -07003844 td->last_trb = ep_ring->enqueue;
3845 field |= TRB_IOC;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003846 /* set BEI, except for the last TD */
3847 if (xhci->hci_version >= 0x100 &&
3848 !(xhci->quirks & XHCI_AVOID_BEI) &&
3849 i < num_tds - 1)
3850 field |= TRB_BEI;
Andiry Xu04e51902010-07-22 15:23:39 -07003851 }
Andiry Xu04e51902010-07-22 15:23:39 -07003852 /* Calculate TRB length */
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003853 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
Andiry Xu04e51902010-07-22 15:23:39 -07003854 if (trb_buff_len > td_remain_len)
3855 trb_buff_len = td_remain_len;
3856
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003857 /* Set the TRB length, TD size, & interrupter fields. */
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003858 remainder = xhci_td_remainder(xhci, running_total,
3859 trb_buff_len, td_len,
Mathias Nyman124c3932016-06-21 10:57:59 +03003860 urb, more_trbs_coming);
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003861
Andiry Xu04e51902010-07-22 15:23:39 -07003862 length_field = TRB_LEN(trb_buff_len) |
Andiry Xu04e51902010-07-22 15:23:39 -07003863 TRB_INTR_TARGET(0);
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003864
Mathias Nyman2f6d3b62016-02-12 16:40:18 +02003865 /* xhci 1.1 with ETE uses TD Size field for TBC */
3866 if (first_trb && xep->use_extended_tbc)
3867 length_field |= TRB_TD_SIZE_TBC(burst_count);
3868 else
3869 length_field |= TRB_TD_SIZE(remainder);
3870 first_trb = false;
3871
Andiry Xu3b72fca2012-03-05 17:49:32 +08003872 queue_trb(xhci, ep_ring, more_trbs_coming,
Andiry Xu04e51902010-07-22 15:23:39 -07003873 lower_32_bits(addr),
3874 upper_32_bits(addr),
3875 length_field,
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003876 field);
Andiry Xu04e51902010-07-22 15:23:39 -07003877 running_total += trb_buff_len;
3878
3879 addr += trb_buff_len;
3880 td_remain_len -= trb_buff_len;
3881 }
3882
3883 /* Check TD length */
3884 if (running_total != td_len) {
3885 xhci_err(xhci, "ISOC TD length unmatch\n");
Andiry Xucf840552012-01-18 17:47:12 +08003886 ret = -EINVAL;
3887 goto cleanup;
Andiry Xu04e51902010-07-22 15:23:39 -07003888 }
3889 }
3890
Lu Baolu79b80942015-08-06 19:24:00 +03003891 /* store the next frame id */
3892 if (HCC_CFC(xhci->hcc_params))
3893 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
3894
Andiry Xuc41136b2011-03-22 17:08:14 +08003895 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3896 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3897 usb_amd_quirk_pll_disable();
3898 }
3899 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3900
Andiry Xue1eab2e2011-01-04 16:30:39 -08003901 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3902 start_cycle, start_trb);
Andiry Xu04e51902010-07-22 15:23:39 -07003903 return 0;
Sarah Sharp522989a2011-07-29 12:44:32 -07003904cleanup:
3905 /* Clean up a partially enqueued isoc transfer. */
3906
3907 for (i--; i >= 0; i--)
Mathias Nyman7e64b032017-01-23 14:20:26 +02003908 list_del_init(&urb_priv->td[i].td_list);
Sarah Sharp522989a2011-07-29 12:44:32 -07003909
3910 /* Use the first TD as a temporary variable to turn the TDs we've queued
3911 * into No-ops with a software-owned cycle bit. That way the hardware
3912 * won't accidentally start executing bogus TDs when we partially
3913 * overwrite them. td->first_trb and td->start_seg are already set.
3914 */
Mathias Nyman7e64b032017-01-23 14:20:26 +02003915 urb_priv->td[0].last_trb = ep_ring->enqueue;
Sarah Sharp522989a2011-07-29 12:44:32 -07003916 /* Every TRB except the first & last will have its cycle bit flipped. */
Mathias Nyman7e64b032017-01-23 14:20:26 +02003917 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
Sarah Sharp522989a2011-07-29 12:44:32 -07003918
3919 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
Mathias Nyman7e64b032017-01-23 14:20:26 +02003920 ep_ring->enqueue = urb_priv->td[0].first_trb;
3921 ep_ring->enq_seg = urb_priv->td[0].start_seg;
Sarah Sharp522989a2011-07-29 12:44:32 -07003922 ep_ring->cycle_state = start_cycle;
Andiry Xub008df62012-03-05 17:49:34 +08003923 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
Sarah Sharp522989a2011-07-29 12:44:32 -07003924 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3925 return ret;
Andiry Xu04e51902010-07-22 15:23:39 -07003926}
3927
3928/*
3929 * Check transfer ring to guarantee there is enough room for the urb.
3930 * Update ISO URB start_frame and interval.
Lu Baolu79b80942015-08-06 19:24:00 +03003931 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
3932 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
3933 * Contiguous Frame ID is not supported by HC.
Andiry Xu04e51902010-07-22 15:23:39 -07003934 */
3935int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3936 struct urb *urb, int slot_id, unsigned int ep_index)
3937{
3938 struct xhci_virt_device *xdev;
3939 struct xhci_ring *ep_ring;
3940 struct xhci_ep_ctx *ep_ctx;
3941 int start_frame;
Andiry Xu04e51902010-07-22 15:23:39 -07003942 int num_tds, num_trbs, i;
3943 int ret;
Lu Baolu79b80942015-08-06 19:24:00 +03003944 struct xhci_virt_ep *xep;
3945 int ist;
Andiry Xu04e51902010-07-22 15:23:39 -07003946
3947 xdev = xhci->devs[slot_id];
Lu Baolu79b80942015-08-06 19:24:00 +03003948 xep = &xhci->devs[slot_id]->eps[ep_index];
Andiry Xu04e51902010-07-22 15:23:39 -07003949 ep_ring = xdev->eps[ep_index].ring;
3950 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3951
3952 num_trbs = 0;
3953 num_tds = urb->number_of_packets;
3954 for (i = 0; i < num_tds; i++)
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003955 num_trbs += count_isoc_trbs_needed(urb, i);
Andiry Xu04e51902010-07-22 15:23:39 -07003956
3957 /* Check the ring to guarantee there is enough room for the whole urb.
3958 * Do not insert any td of the urb to the ring if the check failed.
3959 */
Mathias Nyman5071e6b2016-11-11 15:13:28 +02003960 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
Andiry Xu3b72fca2012-03-05 17:49:32 +08003961 num_trbs, mem_flags);
Andiry Xu04e51902010-07-22 15:23:39 -07003962 if (ret)
3963 return ret;
3964
Lu Baolu79b80942015-08-06 19:24:00 +03003965 /*
3966 * Check interval value. This should be done before we start to
3967 * calculate the start frame value.
3968 */
Alexandr Ivanov78140152016-04-22 13:17:11 +03003969 check_interval(xhci, urb, ep_ctx);
Lu Baolu79b80942015-08-06 19:24:00 +03003970
3971 /* Calculate the start frame and put it in urb->start_frame. */
Lu Baolu42df7212015-11-18 10:48:21 +02003972 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
Mathias Nyman5071e6b2016-11-11 15:13:28 +02003973 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
Lu Baolu42df7212015-11-18 10:48:21 +02003974 urb->start_frame = xep->next_frame_id;
3975 goto skip_start_over;
3976 }
Lu Baolu79b80942015-08-06 19:24:00 +03003977 }
3978
3979 start_frame = readl(&xhci->run_regs->microframe_index);
3980 start_frame &= 0x3fff;
3981 /*
3982 * Round up to the next frame and consider the time before trb really
3983 * gets scheduled by hardare.
3984 */
3985 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3986 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3987 ist <<= 3;
3988 start_frame += ist + XHCI_CFC_DELAY;
3989 start_frame = roundup(start_frame, 8);
3990
3991 /*
3992 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
3993 * is greate than 8 microframes.
3994 */
3995 if (urb->dev->speed == USB_SPEED_LOW ||
3996 urb->dev->speed == USB_SPEED_FULL) {
3997 start_frame = roundup(start_frame, urb->interval << 3);
3998 urb->start_frame = start_frame >> 3;
3999 } else {
4000 start_frame = roundup(start_frame, urb->interval);
4001 urb->start_frame = start_frame;
4002 }
4003
4004skip_start_over:
Andiry Xub008df62012-03-05 17:49:34 +08004005 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
4006
Dan Carpenter3fc82062012-03-28 10:30:26 +03004007 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
Andiry Xu04e51902010-07-22 15:23:39 -07004008}
4009
Sarah Sharpd0e96f52009-04-27 19:58:01 -07004010/**** Command Ring Operations ****/
4011
Sarah Sharp913a8a32009-09-04 10:53:13 -07004012/* Generic function for queueing a command TRB on the command ring.
4013 * Check to make sure there's room on the command ring for one command TRB.
4014 * Also check that there's room reserved for commands that must not fail.
4015 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
4016 * then only check for the number of reserved spots.
4017 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4018 * because the command event handler may want to resubmit a failed command.
4019 */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004020static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4021 u32 field1, u32 field2,
4022 u32 field3, u32 field4, bool command_must_succeed)
Sarah Sharp7f84eef2009-04-27 19:53:56 -07004023{
Sarah Sharp913a8a32009-09-04 10:53:13 -07004024 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
Sarah Sharpd1dc9082010-07-09 17:08:38 +02004025 int ret;
Roger Quadrosad6b1d92015-05-29 17:01:49 +03004026
Mathias Nyman98d74f92016-04-08 16:25:10 +03004027 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4028 (xhci->xhc_state & XHCI_STATE_HALTED)) {
Roger Quadrosad6b1d92015-05-29 17:01:49 +03004029 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03004030 return -ESHUTDOWN;
Roger Quadrosad6b1d92015-05-29 17:01:49 +03004031 }
Sarah Sharpd1dc9082010-07-09 17:08:38 +02004032
Sarah Sharp913a8a32009-09-04 10:53:13 -07004033 if (!command_must_succeed)
4034 reserved_trbs++;
4035
Sarah Sharpd1dc9082010-07-09 17:08:38 +02004036 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
Andiry Xu3b72fca2012-03-05 17:49:32 +08004037 reserved_trbs, GFP_ATOMIC);
Sarah Sharpd1dc9082010-07-09 17:08:38 +02004038 if (ret < 0) {
4039 xhci_err(xhci, "ERR: No room for command on command ring\n");
Sarah Sharp913a8a32009-09-04 10:53:13 -07004040 if (command_must_succeed)
4041 xhci_err(xhci, "ERR: Reserved TRB counting for "
4042 "unfailable commands failed.\n");
Sarah Sharpd1dc9082010-07-09 17:08:38 +02004043 return ret;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07004044 }
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03004045
4046 cmd->command_trb = xhci->cmd_ring->enqueue;
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004047
Mathias Nymanc311e392014-05-08 19:26:03 +03004048 /* if there are no other commands queued we start the timeout timer */
Lu Baoludaa47f22017-01-23 14:20:02 +02004049 if (list_empty(&xhci->cmd_list)) {
Mathias Nymanc311e392014-05-08 19:26:03 +03004050 xhci->current_cmd = cmd;
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02004051 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
Mathias Nymanc311e392014-05-08 19:26:03 +03004052 }
4053
Lu Baoludaa47f22017-01-23 14:20:02 +02004054 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
4055
Andiry Xu3b72fca2012-03-05 17:49:32 +08004056 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4057 field4 | xhci->cmd_ring->cycle_state);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07004058 return 0;
4059}
4060
Sarah Sharp3ffbba92009-04-27 19:57:38 -07004061/* Queue a slot enable or disable request on the command ring */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004062int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
4063 u32 trb_type, u32 slot_id)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07004064{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004065 return queue_command(xhci, cmd, 0, 0, 0,
Sarah Sharp913a8a32009-09-04 10:53:13 -07004066 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07004067}
4068
4069/* Queue an address device command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004070int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4071 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07004072{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004073 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
Sarah Sharp8e595a52009-07-27 12:03:31 -07004074 upper_32_bits(in_ctx_ptr), 0,
Dan Williams48fc7db2013-12-05 17:07:27 -08004075 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4076 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07004077}
Sarah Sharpf94e01862009-04-27 19:58:38 -07004078
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004079int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
Sarah Sharp02386342010-05-24 13:25:28 -07004080 u32 field1, u32 field2, u32 field3, u32 field4)
4081{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004082 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
Sarah Sharp02386342010-05-24 13:25:28 -07004083}
4084
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08004085/* Queue a reset device command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004086int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4087 u32 slot_id)
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08004088{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004089 return queue_command(xhci, cmd, 0, 0, 0,
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08004090 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4091 false);
4092}
4093
Sarah Sharpf94e01862009-04-27 19:58:38 -07004094/* Queue a configure endpoint command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004095int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
4096 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
Sarah Sharp913a8a32009-09-04 10:53:13 -07004097 u32 slot_id, bool command_must_succeed)
Sarah Sharpf94e01862009-04-27 19:58:38 -07004098{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004099 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
Sarah Sharp8e595a52009-07-27 12:03:31 -07004100 upper_32_bits(in_ctx_ptr), 0,
Sarah Sharp913a8a32009-09-04 10:53:13 -07004101 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4102 command_must_succeed);
Sarah Sharpf94e01862009-04-27 19:58:38 -07004103}
Sarah Sharpae636742009-04-29 19:02:31 -07004104
Sarah Sharpf2217e82009-08-07 14:04:43 -07004105/* Queue an evaluate context command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004106int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4107 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
Sarah Sharpf2217e82009-08-07 14:04:43 -07004108{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004109 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
Sarah Sharpf2217e82009-08-07 14:04:43 -07004110 upper_32_bits(in_ctx_ptr), 0,
Sarah Sharp913a8a32009-09-04 10:53:13 -07004111 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
Sarah Sharp4b266542012-05-07 15:34:26 -07004112 command_must_succeed);
Sarah Sharpf2217e82009-08-07 14:04:43 -07004113}
4114
Andiry Xube88fe42010-10-14 07:22:57 -07004115/*
4116 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
4117 * activity on an endpoint that is about to be suspended.
4118 */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004119int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4120 int slot_id, unsigned int ep_index, int suspend)
Sarah Sharpae636742009-04-29 19:02:31 -07004121{
4122 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4123 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4124 u32 type = TRB_TYPE(TRB_STOP_RING);
Andiry Xube88fe42010-10-14 07:22:57 -07004125 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
Sarah Sharpae636742009-04-29 19:02:31 -07004126
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004127 return queue_command(xhci, cmd, 0, 0, 0,
Andiry Xube88fe42010-10-14 07:22:57 -07004128 trb_slot_id | trb_ep_index | type | trb_suspend, false);
Sarah Sharpae636742009-04-29 19:02:31 -07004129}
4130
Hans de Goeded3a43e62014-08-20 16:41:53 +03004131/* Set Transfer Ring Dequeue Pointer command */
4132void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
4133 unsigned int slot_id, unsigned int ep_index,
Hans de Goeded3a43e62014-08-20 16:41:53 +03004134 struct xhci_dequeue_state *deq_state)
Sarah Sharpae636742009-04-29 19:02:31 -07004135{
4136 dma_addr_t addr;
4137 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4138 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
Mathias Nyman87907362017-06-02 16:36:23 +03004139 u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
Hans de Goede95241db2013-10-04 00:29:48 +02004140 u32 trb_sct = 0;
Sarah Sharpae636742009-04-29 19:02:31 -07004141 u32 type = TRB_TYPE(TRB_SET_DEQ);
Sarah Sharpbf161e82011-02-23 15:46:42 -08004142 struct xhci_virt_ep *ep;
Hans de Goede1e3452e2014-08-20 16:41:52 +03004143 struct xhci_command *cmd;
4144 int ret;
Sarah Sharpae636742009-04-29 19:02:31 -07004145
Hans de Goeded3a43e62014-08-20 16:41:53 +03004146 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
4147 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
4148 deq_state->new_deq_seg,
4149 (unsigned long long)deq_state->new_deq_seg->dma,
4150 deq_state->new_deq_ptr,
4151 (unsigned long long)xhci_trb_virt_to_dma(
4152 deq_state->new_deq_seg, deq_state->new_deq_ptr),
4153 deq_state->new_cycle_state);
4154
4155 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
4156 deq_state->new_deq_ptr);
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07004157 if (addr == 0) {
Sarah Sharpae636742009-04-29 19:02:31 -07004158 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07004159 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
Hans de Goeded3a43e62014-08-20 16:41:53 +03004160 deq_state->new_deq_seg, deq_state->new_deq_ptr);
4161 return;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07004162 }
Sarah Sharpbf161e82011-02-23 15:46:42 -08004163 ep = &xhci->devs[slot_id]->eps[ep_index];
4164 if ((ep->ep_state & SET_DEQ_PENDING)) {
4165 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4166 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
Hans de Goeded3a43e62014-08-20 16:41:53 +03004167 return;
Sarah Sharpbf161e82011-02-23 15:46:42 -08004168 }
Hans de Goede1e3452e2014-08-20 16:41:52 +03004169
4170 /* This function gets called from contexts where it cannot sleep */
Mathias Nyman103afda2017-12-08 17:59:08 +02004171 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
Lu Baolu74e0b562017-04-07 17:57:05 +03004172 if (!cmd)
Hans de Goeded3a43e62014-08-20 16:41:53 +03004173 return;
Hans de Goede1e3452e2014-08-20 16:41:52 +03004174
Hans de Goeded3a43e62014-08-20 16:41:53 +03004175 ep->queued_deq_seg = deq_state->new_deq_seg;
4176 ep->queued_deq_ptr = deq_state->new_deq_ptr;
Mathias Nyman87907362017-06-02 16:36:23 +03004177 if (deq_state->stream_id)
Hans de Goede95241db2013-10-04 00:29:48 +02004178 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
Hans de Goede1e3452e2014-08-20 16:41:52 +03004179 ret = queue_command(xhci, cmd,
Hans de Goeded3a43e62014-08-20 16:41:53 +03004180 lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
4181 upper_32_bits(addr), trb_stream_id,
4182 trb_slot_id | trb_ep_index | type, false);
Hans de Goede1e3452e2014-08-20 16:41:52 +03004183 if (ret < 0) {
4184 xhci_free_command(xhci, cmd);
Hans de Goeded3a43e62014-08-20 16:41:53 +03004185 return;
Hans de Goede1e3452e2014-08-20 16:41:52 +03004186 }
4187
Hans de Goeded3a43e62014-08-20 16:41:53 +03004188 /* Stop the TD queueing code from ringing the doorbell until
4189 * this command completes. The HC won't set the dequeue pointer
4190 * if the ring is running, and ringing the doorbell starts the
4191 * ring running.
4192 */
4193 ep->ep_state |= SET_DEQ_PENDING;
Sarah Sharpae636742009-04-29 19:02:31 -07004194}
Sarah Sharpa1587d92009-07-27 12:03:15 -07004195
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004196int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
Mathias Nyman21749142017-06-15 11:55:44 +03004197 int slot_id, unsigned int ep_index,
4198 enum xhci_ep_reset_type reset_type)
Sarah Sharpa1587d92009-07-27 12:03:15 -07004199{
4200 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4201 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4202 u32 type = TRB_TYPE(TRB_RESET_EP);
4203
Mathias Nyman21749142017-06-15 11:55:44 +03004204 if (reset_type == EP_SOFT_RESET)
4205 type |= TRB_TSP;
4206
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004207 return queue_command(xhci, cmd, 0, 0, 0,
4208 trb_slot_id | trb_ep_index | type, false);
Sarah Sharpa1587d92009-07-27 12:03:15 -07004209}