blob: 88071c4444c6aae708e217bae37927f6e380498b [file] [log] [blame]
Greg Kroah-Hartman5fd54ac2017-11-03 11:28:30 +01001// SPDX-License-Identifier: GPL-2.0
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
Sarah Sharp7f84eef2009-04-27 19:53:56 -07009 */
10
11/*
12 * Ring initialization rules:
13 * 1. Each segment is initialized to zero, except for link TRBs.
14 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
15 * Consumer Cycle State (CCS), depending on ring function.
16 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
17 *
18 * Ring behavior rules:
19 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
20 * least one free TRB in the ring. This is useful if you want to turn that
21 * into a link TRB and expand the ring.
22 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
23 * link TRB, then load the pointer with the address in the link TRB. If the
24 * link TRB had its toggle bit set, you may need to update the ring cycle
25 * state (see cycle bit rules). You may have to do this multiple times
26 * until you reach a non-link TRB.
27 * 3. A ring is full if enqueue++ (for the definition of increment above)
28 * equals the dequeue pointer.
29 *
30 * Cycle bit rules:
31 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
32 * in a link TRB, it must toggle the ring cycle state.
33 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
34 * in a link TRB, it must toggle the ring cycle state.
35 *
36 * Producer rules:
37 * 1. Check if ring is full before you enqueue.
38 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
39 * Update enqueue pointer between each write (which may update the ring
40 * cycle state).
41 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
42 * and endpoint rings. If HC is the producer for the event ring,
43 * and it generates an interrupt according to interrupt modulation rules.
44 *
45 * Consumer rules:
46 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
47 * the TRB is owned by the consumer.
48 * 2. Update dequeue pointer (which may update the ring cycle state) and
49 * continue processing TRBs until you reach a TRB which is not owned by you.
50 * 3. Notify the producer. SW is the consumer for the event ring, and it
51 * updates event ring dequeue pointer. HC is the consumer for the command and
52 * endpoint rings; it generates events on the event ring for these.
53 */
54
Sarah Sharp8a96c052009-04-27 19:59:19 -070055#include <linux/scatterlist.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090056#include <linux/slab.h>
Mathias Nymanf9c589e2016-06-21 10:58:02 +030057#include <linux/dma-mapping.h>
Sarah Sharp7f84eef2009-04-27 19:53:56 -070058#include "xhci.h"
Xenia Ragiadakou3a7fa5b2013-07-31 07:35:27 +030059#include "xhci-trace.h"
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +020060#include "xhci-mtk.h"
Sarah Sharp7f84eef2009-04-27 19:53:56 -070061
62/*
63 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
64 * address of the TRB.
65 */
Sarah Sharp23e3be12009-04-29 19:05:20 -070066dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
Sarah Sharp7f84eef2009-04-27 19:53:56 -070067 union xhci_trb *trb)
68{
Sarah Sharp6071d832009-05-14 11:44:14 -070069 unsigned long segment_offset;
Sarah Sharp7f84eef2009-04-27 19:53:56 -070070
Sarah Sharp6071d832009-05-14 11:44:14 -070071 if (!seg || !trb || trb < seg->trbs)
Sarah Sharp7f84eef2009-04-27 19:53:56 -070072 return 0;
Sarah Sharp6071d832009-05-14 11:44:14 -070073 /* offset in TRBs */
74 segment_offset = trb - seg->trbs;
Mathias Nyman78950862015-08-03 16:07:48 +030075 if (segment_offset >= TRBS_PER_SEGMENT)
Sarah Sharp7f84eef2009-04-27 19:53:56 -070076 return 0;
Sarah Sharp6071d832009-05-14 11:44:14 -070077 return seg->dma + (segment_offset * sizeof(*trb));
Sarah Sharp7f84eef2009-04-27 19:53:56 -070078}
79
Mathias Nyman0ce57492016-11-11 15:13:14 +020080static bool trb_is_noop(union xhci_trb *trb)
81{
82 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
83}
84
Mathias Nyman2d98ef42016-06-21 10:58:04 +030085static bool trb_is_link(union xhci_trb *trb)
86{
87 return TRB_TYPE_LINK_LE32(trb->link.control);
88}
89
Mathias Nymanbd5e67f2016-06-21 10:58:05 +030090static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
91{
92 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
93}
94
95static bool last_trb_on_ring(struct xhci_ring *ring,
96 struct xhci_segment *seg, union xhci_trb *trb)
97{
98 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
99}
100
Mathias Nymand0c77d82016-06-21 10:58:07 +0300101static bool link_trb_toggles_cycle(union xhci_trb *trb)
102{
103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104}
105
Mathias Nyman2a721262016-11-11 15:13:24 +0200106static bool last_td_in_urb(struct xhci_td *td)
107{
108 struct urb_priv *urb_priv = td->urb->hcpriv;
109
Mathias Nyman9ef7fbb2017-01-23 14:20:25 +0200110 return urb_priv->num_tds_done == urb_priv->num_tds;
Mathias Nyman2a721262016-11-11 15:13:24 +0200111}
112
113static void inc_td_cnt(struct urb *urb)
114{
115 struct urb_priv *urb_priv = urb->hcpriv;
116
Mathias Nyman9ef7fbb2017-01-23 14:20:25 +0200117 urb_priv->num_tds_done++;
Mathias Nyman2a721262016-11-11 15:13:24 +0200118}
119
Mathias Nymanae1e3f02017-01-23 14:20:15 +0200120static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
121{
122 if (trb_is_link(trb)) {
123 /* unchain chained link TRBs */
124 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
125 } else {
126 trb->generic.field[0] = 0;
127 trb->generic.field[1] = 0;
128 trb->generic.field[2] = 0;
129 /* Preserve only the cycle bit of this TRB */
130 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
131 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
132 }
133}
134
Sarah Sharpae636742009-04-29 19:02:31 -0700135/* Updates trb to point to the next TRB in the ring, and updates seg if the next
136 * TRB is in a new segment. This does not skip over link TRBs, and it does not
137 * effect the ring dequeue or enqueue pointers.
138 */
139static void next_trb(struct xhci_hcd *xhci,
140 struct xhci_ring *ring,
141 struct xhci_segment **seg,
142 union xhci_trb **trb)
143{
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300144 if (trb_is_link(*trb)) {
Sarah Sharpae636742009-04-29 19:02:31 -0700145 *seg = (*seg)->next;
146 *trb = ((*seg)->trbs);
147 } else {
John Youna1669b22010-08-09 13:56:11 -0700148 (*trb)++;
Sarah Sharpae636742009-04-29 19:02:31 -0700149 }
150}
151
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700152/*
153 * See Cycle bit rules. SW is the consumer for the event ring only.
154 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
155 */
Lu Baolu67d2ea92017-12-08 17:59:09 +0200156void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700157{
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300158 /* event ring doesn't have link trbs, check for last trb */
159 if (ring->type == TYPE_EVENT) {
160 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
Sarah Sharp50d02062012-07-26 12:03:59 -0700161 ring->dequeue++;
Adam Wallis49d5b052017-10-05 11:21:47 +0300162 goto out;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700163 }
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300164 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
165 ring->cycle_state ^= 1;
166 ring->deq_seg = ring->deq_seg->next;
167 ring->dequeue = ring->deq_seg->trbs;
Adam Wallis49d5b052017-10-05 11:21:47 +0300168 goto out;
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300169 }
170
171 /* All other rings have link trbs */
172 if (!trb_is_link(ring->dequeue)) {
173 ring->dequeue++;
174 ring->num_trbs_free++;
175 }
176 while (trb_is_link(ring->dequeue)) {
177 ring->deq_seg = ring->deq_seg->next;
178 ring->dequeue = ring->deq_seg->trbs;
179 }
Lu Baolub2d6edb2017-04-07 17:57:02 +0300180
Adam Wallis49d5b052017-10-05 11:21:47 +0300181out:
Lu Baolub2d6edb2017-04-07 17:57:02 +0300182 trace_xhci_inc_deq(ring);
183
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300184 return;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700185}
186
187/*
188 * See Cycle bit rules. SW is the consumer for the event ring only.
189 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
190 *
191 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
192 * chain bit is set), then set the chain bit in all the following link TRBs.
193 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
194 * have their chain bit cleared (so that each Link TRB is a separate TD).
195 *
196 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
Sarah Sharpb0567b32009-08-07 14:04:36 -0700197 * set, but other sections talk about dealing with the chain bit set. This was
198 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
199 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
Sarah Sharp6cc30d82010-06-10 12:25:28 -0700200 *
201 * @more_trbs_coming: Will you enqueue more TRBs before calling
202 * prepare_transfer()?
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700203 */
Sarah Sharp6cc30d82010-06-10 12:25:28 -0700204static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
Andiry Xu3b72fca2012-03-05 17:49:32 +0800205 bool more_trbs_coming)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700206{
207 u32 chain;
208 union xhci_trb *next;
209
Matt Evans28ccd292011-03-29 13:40:46 +1100210 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
Andiry Xub008df62012-03-05 17:49:34 +0800211 /* If this is not event ring, there is one less usable TRB */
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300212 if (!trb_is_link(ring->enqueue))
Andiry Xub008df62012-03-05 17:49:34 +0800213 ring->num_trbs_free--;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700214 next = ++(ring->enqueue);
215
Mathias Nyman22511982016-06-21 10:58:03 +0300216 /* Update the dequeue pointer further if that was a link TRB */
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300217 while (trb_is_link(next)) {
Sarah Sharp6cc30d82010-06-10 12:25:28 -0700218
Mathias Nyman22511982016-06-21 10:58:03 +0300219 /*
220 * If the caller doesn't plan on enqueueing more TDs before
221 * ringing the doorbell, then we don't want to give the link TRB
222 * to the hardware just yet. We'll give the link TRB back in
223 * prepare_ring() just before we enqueue the TD at the top of
224 * the ring.
225 */
226 if (!chain && !more_trbs_coming)
227 break;
Andiry Xu3b72fca2012-03-05 17:49:32 +0800228
Mathias Nyman22511982016-06-21 10:58:03 +0300229 /* If we're not dealing with 0.95 hardware or isoc rings on
230 * AMD 0.96 host, carry over the chain bit of the previous TRB
231 * (which may mean the chain bit is cleared).
232 */
233 if (!(ring->type == TYPE_ISOC &&
234 (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
235 !xhci_link_trb_quirk(xhci)) {
236 next->link.control &= cpu_to_le32(~TRB_CHAIN);
237 next->link.control |= cpu_to_le32(chain);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700238 }
Mathias Nyman22511982016-06-21 10:58:03 +0300239 /* Give this link TRB to the hardware */
240 wmb();
241 next->link.control ^= cpu_to_le32(TRB_CYCLE);
242
243 /* Toggle the cycle bit after the last ring segment. */
Mathias Nymand0c77d82016-06-21 10:58:07 +0300244 if (link_trb_toggles_cycle(next))
Mathias Nyman22511982016-06-21 10:58:03 +0300245 ring->cycle_state ^= 1;
246
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700247 ring->enq_seg = ring->enq_seg->next;
248 ring->enqueue = ring->enq_seg->trbs;
249 next = ring->enqueue;
250 }
Lu Baolub2d6edb2017-04-07 17:57:02 +0300251
252 trace_xhci_inc_enq(ring);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700253}
254
255/*
Andiry Xu085deb12012-03-05 17:49:40 +0800256 * Check to see if there's room to enqueue num_trbs on the ring and make sure
257 * enqueue pointer will not advance into dequeue segment. See rules above.
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700258 */
Andiry Xub008df62012-03-05 17:49:34 +0800259static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700260 unsigned int num_trbs)
261{
Andiry Xu085deb12012-03-05 17:49:40 +0800262 int num_trbs_in_deq_seg;
Andiry Xub008df62012-03-05 17:49:34 +0800263
Andiry Xu085deb12012-03-05 17:49:40 +0800264 if (ring->num_trbs_free < num_trbs)
265 return 0;
266
267 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
268 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
269 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
270 return 0;
271 }
272
273 return 1;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700274}
275
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700276/* Ring the host controller doorbell after placing a command on the ring */
Sarah Sharp23e3be12009-04-29 19:05:20 -0700277void xhci_ring_cmd_db(struct xhci_hcd *xhci)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700278{
Elric Fuc181bc52012-06-27 16:30:57 +0800279 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
280 return;
281
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700282 xhci_dbg(xhci, "// Ding dong!\n");
Xenia Ragiadakou204b7792013-11-15 05:34:07 +0200283 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700284 /* Flush PCI posted writes */
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +0200285 readl(&xhci->dba->doorbell[0]);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700286}
287
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +0200288static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
289{
290 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
291}
292
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200293static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
294{
295 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
296 cmd_list);
297}
298
299/*
300 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
301 * If there are other commands waiting then restart the ring and kick the timer.
302 * This must be called with command ring stopped and xhci->lock held.
303 */
304static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
305 struct xhci_command *cur_cmd)
306{
307 struct xhci_command *i_cmd;
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200308
309 /* Turn all aborted commands in list to no-ops, then restart */
310 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
311
Felipe Balbi0b7c1052017-01-23 14:20:06 +0200312 if (i_cmd->status != COMP_COMMAND_ABORTED)
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200313 continue;
314
Mathias Nyman604d02a2017-05-17 18:32:05 +0300315 i_cmd->status = COMP_COMMAND_RING_STOPPED;
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200316
317 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
318 i_cmd->command_trb);
Mathias Nyman52782042017-01-23 14:20:16 +0200319
320 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200321
322 /*
323 * caller waiting for completion is called when command
324 * completion event is received for these no-op commands
325 */
326 }
327
328 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
329
330 /* ring command ring doorbell to restart the command ring */
331 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
332 !(xhci->xhc_state & XHCI_STATE_DYING)) {
333 xhci->current_cmd = cur_cmd;
334 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
335 xhci_ring_cmd_db(xhci);
336 }
337}
338
339/* Must be called with xhci->lock held, releases and aquires lock back */
340static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
Elric Fub92cc662012-06-27 16:31:12 +0800341{
342 u64 temp_64;
343 int ret;
344
345 xhci_dbg(xhci, "Abort command ring\n");
346
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200347 reinit_completion(&xhci->cmd_ring_stop_completion);
Mathias Nyman3425aa02016-06-01 18:09:08 +0300348
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200349 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
Sarah Sharp477632d2014-01-29 14:02:00 -0800350 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
351 &xhci->op_regs->cmd_ring);
Elric Fub92cc662012-06-27 16:31:12 +0800352
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300353 /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
354 * completion of the Command Abort operation. If CRR is not negated in 5
355 * seconds then driver handles it as if host died (-ENODEV).
356 * In the future we should distinguish between -ENODEV and -ETIMEDOUT
357 * and try to recover a -ETIMEDOUT with a host controller reset.
Elric Fub92cc662012-06-27 16:31:12 +0800358 */
Lin Wangdc0b1772015-01-09 16:06:28 +0200359 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
Elric Fub92cc662012-06-27 16:31:12 +0800360 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
361 if (ret < 0) {
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300362 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
Lu Baolu1cc6d862017-01-23 14:19:55 +0200363 xhci_halt(xhci);
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300364 xhci_hc_died(xhci);
365 return ret;
Elric Fub92cc662012-06-27 16:31:12 +0800366 }
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +0200367 /*
368 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
369 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
370 * but the completion event in never sent. Wait 2 secs (arbitrary
371 * number) to handle those cases after negation of CMD_RING_RUNNING.
372 */
373 spin_unlock_irqrestore(&xhci->lock, flags);
374 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
375 msecs_to_jiffies(2000));
376 spin_lock_irqsave(&xhci->lock, flags);
377 if (!ret) {
378 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
379 xhci_cleanup_command_queue(xhci);
380 } else {
381 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
382 }
Elric Fub92cc662012-06-27 16:31:12 +0800383 return 0;
384}
385
Andiry Xube88fe42010-10-14 07:22:57 -0700386void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
Sarah Sharpae636742009-04-29 19:02:31 -0700387 unsigned int slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700388 unsigned int ep_index,
389 unsigned int stream_id)
Sarah Sharpae636742009-04-29 19:02:31 -0700390{
Matt Evans28ccd292011-03-29 13:40:46 +1100391 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
Matthew Wilcox50d646762010-12-15 14:18:11 -0500392 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
393 unsigned int ep_state = ep->ep_state;
Sarah Sharpae636742009-04-29 19:02:31 -0700394
Sarah Sharpae636742009-04-29 19:02:31 -0700395 /* Don't ring the doorbell for this endpoint if there are pending
Matthew Wilcox50d646762010-12-15 14:18:11 -0500396 * cancellations because we don't want to interrupt processing.
Sarah Sharp8df75f42010-04-02 15:34:16 -0700397 * We don't want to restart any stream rings if there's a set dequeue
398 * pointer command pending because the device can choose to start any
399 * stream once the endpoint is on the HW schedule.
Sarah Sharpae636742009-04-29 19:02:31 -0700400 */
Mathias Nyman9983a5f2017-01-23 14:19:52 +0200401 if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
Matthew Wilcox50d646762010-12-15 14:18:11 -0500402 (ep_state & EP_HALTED))
403 return;
Xenia Ragiadakou204b7792013-11-15 05:34:07 +0200404 writel(DB_VALUE(ep_index, stream_id), db_addr);
Matthew Wilcox50d646762010-12-15 14:18:11 -0500405 /* The CPU has better things to do at this point than wait for a
406 * write-posting flush. It'll get there soon enough.
407 */
Sarah Sharpae636742009-04-29 19:02:31 -0700408}
409
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700410/* Ring the doorbell for any rings with pending URBs */
411static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
412 unsigned int slot_id,
413 unsigned int ep_index)
414{
415 unsigned int stream_id;
416 struct xhci_virt_ep *ep;
417
418 ep = &xhci->devs[slot_id]->eps[ep_index];
419
420 /* A ring has pending URBs if its TD list is not empty */
421 if (!(ep->ep_state & EP_HAS_STREAMS)) {
Oleksij Rempeld66eaf92013-07-21 15:36:19 +0200422 if (ep->ring && !(list_empty(&ep->ring->td_list)))
Andiry Xube88fe42010-10-14 07:22:57 -0700423 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700424 return;
425 }
426
427 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
428 stream_id++) {
429 struct xhci_stream_info *stream_info = ep->stream_info;
430 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
Andiry Xube88fe42010-10-14 07:22:57 -0700431 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
432 stream_id);
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700433 }
434}
435
Alexandr Ivanov75b040e2016-04-22 13:17:10 +0300436/* Get the right ring for the given slot_id, ep_index and stream_id.
437 * If the endpoint supports streams, boundary check the URB's stream ID.
438 * If the endpoint doesn't support streams, return the singular endpoint ring.
439 */
440struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
Sarah Sharp021bff92010-07-29 22:12:20 -0700441 unsigned int slot_id, unsigned int ep_index,
442 unsigned int stream_id)
443{
444 struct xhci_virt_ep *ep;
445
446 ep = &xhci->devs[slot_id]->eps[ep_index];
447 /* Common case: no streams */
448 if (!(ep->ep_state & EP_HAS_STREAMS))
449 return ep->ring;
450
451 if (stream_id == 0) {
452 xhci_warn(xhci,
453 "WARN: Slot ID %u, ep index %u has streams, "
454 "but URB has no stream ID.\n",
455 slot_id, ep_index);
456 return NULL;
457 }
458
459 if (stream_id < ep->stream_info->num_streams)
460 return ep->stream_info->stream_rings[stream_id];
461
462 xhci_warn(xhci,
463 "WARN: Slot ID %u, ep index %u has "
464 "stream IDs 1 to %u allocated, "
465 "but stream ID %u is requested.\n",
466 slot_id, ep_index,
467 ep->stream_info->num_streams - 1,
468 stream_id);
469 return NULL;
470}
471
Mathias Nymane6b20122017-06-02 16:36:22 +0300472
473/*
474 * Get the hw dequeue pointer xHC stopped on, either directly from the
475 * endpoint context, or if streams are in use from the stream context.
476 * The returned hw_dequeue contains the lowest four bits with cycle state
477 * and possbile stream context type.
478 */
479static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
480 unsigned int ep_index, unsigned int stream_id)
481{
482 struct xhci_ep_ctx *ep_ctx;
483 struct xhci_stream_ctx *st_ctx;
484 struct xhci_virt_ep *ep;
485
486 ep = &vdev->eps[ep_index];
487
488 if (ep->ep_state & EP_HAS_STREAMS) {
489 st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
490 return le64_to_cpu(st_ctx->stream_ring);
491 }
492 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
493 return le64_to_cpu(ep_ctx->deq);
494}
495
Sarah Sharpae636742009-04-29 19:02:31 -0700496/*
497 * Move the xHC's endpoint ring dequeue pointer past cur_td.
498 * Record the new state of the xHC's endpoint ring dequeue segment,
Mathias Nyman87907362017-06-02 16:36:23 +0300499 * dequeue pointer, stream id, and new consumer cycle state in state.
Sarah Sharpae636742009-04-29 19:02:31 -0700500 * Update our internal representation of the ring's dequeue pointer.
501 *
502 * We do this in three jumps:
503 * - First we update our new ring state to be the same as when the xHC stopped.
504 * - Then we traverse the ring to find the segment that contains
505 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
506 * any link TRBs with the toggle cycle bit set.
507 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
508 * if we've moved it past a link TRB with the toggle cycle bit set.
Matt Evans28ccd292011-03-29 13:40:46 +1100509 *
510 * Some of the uses of xhci_generic_trb are grotty, but if they're done
511 * with correct __le32 accesses they should work fine. Only users of this are
512 * in here.
Sarah Sharpae636742009-04-29 19:02:31 -0700513 */
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700514void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
Sarah Sharpae636742009-04-29 19:02:31 -0700515 unsigned int slot_id, unsigned int ep_index,
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700516 unsigned int stream_id, struct xhci_td *cur_td,
517 struct xhci_dequeue_state *state)
Sarah Sharpae636742009-04-29 19:02:31 -0700518{
519 struct xhci_virt_device *dev = xhci->devs[slot_id];
Hans de Goedec4bedb72013-10-04 00:29:47 +0200520 struct xhci_virt_ep *ep = &dev->eps[ep_index];
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700521 struct xhci_ring *ep_ring;
Mathias Nyman365038d2014-08-19 15:17:58 +0300522 struct xhci_segment *new_seg;
523 union xhci_trb *new_deq;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700524 dma_addr_t addr;
Julius Werner1f81b6d2014-04-25 19:20:13 +0300525 u64 hw_dequeue;
Mathias Nyman365038d2014-08-19 15:17:58 +0300526 bool cycle_found = false;
527 bool td_last_trb_found = false;
Sarah Sharpae636742009-04-29 19:02:31 -0700528
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700529 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
530 ep_index, stream_id);
531 if (!ep_ring) {
532 xhci_warn(xhci, "WARN can't find new dequeue state "
533 "for invalid stream ID %u.\n",
534 stream_id);
535 return;
536 }
Sarah Sharpae636742009-04-29 19:02:31 -0700537 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300538 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
539 "Finding endpoint context");
Sarah Sharpae636742009-04-29 19:02:31 -0700540
Mathias Nymane6b20122017-06-02 16:36:22 +0300541 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
Mathias Nyman365038d2014-08-19 15:17:58 +0300542 new_seg = ep_ring->deq_seg;
543 new_deq = ep_ring->dequeue;
544 state->new_cycle_state = hw_dequeue & 0x1;
Mathias Nyman87907362017-06-02 16:36:23 +0300545 state->stream_id = stream_id;
Mathias Nyman365038d2014-08-19 15:17:58 +0300546
547 /*
548 * We want to find the pointer, segment and cycle state of the new trb
549 * (the one after current TD's last_trb). We know the cycle state at
550 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
551 * found.
552 */
553 do {
554 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
555 == (dma_addr_t)(hw_dequeue & ~0xf)) {
556 cycle_found = true;
557 if (td_last_trb_found)
558 break;
559 }
560 if (new_deq == cur_td->last_trb)
561 td_last_trb_found = true;
562
Mathias Nyman3495e452016-11-11 15:13:13 +0200563 if (cycle_found && trb_is_link(new_deq) &&
564 link_trb_toggles_cycle(new_deq))
Mathias Nyman365038d2014-08-19 15:17:58 +0300565 state->new_cycle_state ^= 0x1;
566
567 next_trb(xhci, ep_ring, &new_seg, &new_deq);
568
569 /* Search wrapped around, bail out */
570 if (new_deq == ep->ring->dequeue) {
571 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
572 state->new_deq_seg = NULL;
573 state->new_deq_ptr = NULL;
Julius Werner1f81b6d2014-04-25 19:20:13 +0300574 return;
575 }
Julius Werner1f81b6d2014-04-25 19:20:13 +0300576
Mathias Nyman365038d2014-08-19 15:17:58 +0300577 } while (!cycle_found || !td_last_trb_found);
Sarah Sharpae636742009-04-29 19:02:31 -0700578
Mathias Nyman365038d2014-08-19 15:17:58 +0300579 state->new_deq_seg = new_seg;
580 state->new_deq_ptr = new_deq;
Sarah Sharpae636742009-04-29 19:02:31 -0700581
Julius Werner1f81b6d2014-04-25 19:20:13 +0300582 /* Don't update the ring cycle state for the producer (us). */
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300583 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
584 "Cycle state = 0x%x", state->new_cycle_state);
Sarah Sharp01a1fdb2011-02-23 18:12:29 -0800585
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300586 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
587 "New dequeue segment = %p (virtual)",
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700588 state->new_deq_seg);
589 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300590 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
591 "New dequeue pointer = 0x%llx (DMA)",
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700592 (unsigned long long) addr);
Sarah Sharpae636742009-04-29 19:02:31 -0700593}
594
Sarah Sharp522989a2011-07-29 12:44:32 -0700595/* flip_cycle means flip the cycle bit of all but the first and last TRB.
596 * (The last TRB actually points to the ring enqueue pointer, which is not part
597 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
598 */
Sarah Sharp23e3be12009-04-29 19:05:20 -0700599static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
Mathias Nyman0d58a1a2016-11-11 15:13:20 +0200600 struct xhci_td *td, bool flip_cycle)
Sarah Sharpae636742009-04-29 19:02:31 -0700601{
Mathias Nyman0d58a1a2016-11-11 15:13:20 +0200602 struct xhci_segment *seg = td->start_seg;
603 union xhci_trb *trb = td->first_trb;
Sarah Sharpae636742009-04-29 19:02:31 -0700604
Mathias Nyman0d58a1a2016-11-11 15:13:20 +0200605 while (1) {
Mathias Nymanae1e3f02017-01-23 14:20:15 +0200606 trb_to_noop(trb, TRB_TR_NOOP);
607
Mathias Nyman0d58a1a2016-11-11 15:13:20 +0200608 /* flip cycle if asked to */
609 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
610 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
611
612 if (trb == td->last_trb)
Sarah Sharpae636742009-04-29 19:02:31 -0700613 break;
Mathias Nyman0d58a1a2016-11-11 15:13:20 +0200614
615 next_trb(xhci, ep_ring, &seg, &trb);
Sarah Sharpae636742009-04-29 19:02:31 -0700616 }
617}
618
Dmitry Torokhov575688e2011-03-20 02:15:16 -0700619static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700620 struct xhci_virt_ep *ep)
621{
Mathias Nyman9983a5f2017-01-23 14:19:52 +0200622 ep->ep_state &= ~EP_STOP_CMD_PENDING;
Mathias Nymanf9926592017-01-23 14:19:53 +0200623 /* Can't del_timer_sync in interrupt */
624 del_timer(&ep->stop_cmd_timer);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700625}
626
Mathias Nyman446b3142016-11-11 15:13:22 +0200627/*
Mathias Nyman2a721262016-11-11 15:13:24 +0200628 * Must be called with xhci->lock held in interrupt context,
629 * releases and re-acquires xhci->lock
Mathias Nyman446b3142016-11-11 15:13:22 +0200630 */
Mathias Nyman2a721262016-11-11 15:13:24 +0200631static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
632 struct xhci_td *cur_td, int status)
Mathias Nyman446b3142016-11-11 15:13:22 +0200633{
Mathias Nyman2a721262016-11-11 15:13:24 +0200634 struct urb *urb = cur_td->urb;
635 struct urb_priv *urb_priv = urb->hcpriv;
636 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
Mathias Nyman446b3142016-11-11 15:13:22 +0200637
Mathias Nyman2a721262016-11-11 15:13:24 +0200638 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
639 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
640 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
641 if (xhci->quirks & XHCI_AMD_PLL_FIX)
642 usb_amd_quirk_pll_enable();
643 }
644 }
Mathias Nyman446b3142016-11-11 15:13:22 +0200645 xhci_urb_free_priv(urb_priv);
Mathias Nyman2a721262016-11-11 15:13:24 +0200646 usb_hcd_unlink_urb_from_ep(hcd, urb);
Mathias Nyman446b3142016-11-11 15:13:22 +0200647 spin_unlock(&xhci->lock);
Felipe Balbi5abdc2e2017-01-23 14:20:20 +0200648 trace_xhci_urb_giveback(urb);
Mathias Nyman7bc5d5a2017-05-17 18:31:59 +0300649 usb_hcd_giveback_urb(hcd, urb, status);
Mathias Nyman446b3142016-11-11 15:13:22 +0200650 spin_lock(&xhci->lock);
651}
652
Wei Yongjun2d6d5762016-11-11 15:13:21 +0200653static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
654 struct xhci_ring *ring, struct xhci_td *td)
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300655{
656 struct device *dev = xhci_to_hcd(xhci)->self.controller;
657 struct xhci_segment *seg = td->bounce_seg;
658 struct urb *urb = td->urb;
659
Felipe Balbif45e2a02017-01-23 14:20:13 +0200660 if (!ring || !seg || !urb)
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300661 return;
662
663 if (usb_urb_dir_out(urb)) {
664 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
665 DMA_TO_DEVICE);
666 return;
667 }
668
669 /* for in tranfers we need to copy the data from bounce to sg */
670 sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
671 seg->bounce_len, seg->bounce_offs);
672 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
673 DMA_FROM_DEVICE);
674 seg->bounce_len = 0;
675 seg->bounce_offs = 0;
676}
677
Sarah Sharpae636742009-04-29 19:02:31 -0700678/*
679 * When we get a command completion for a Stop Endpoint Command, we need to
680 * unlink any cancelled TDs from the ring. There are two ways to do that:
681 *
682 * 1. If the HW was in the middle of processing the TD that needs to be
683 * cancelled, then we must move the ring's dequeue pointer past the last TRB
684 * in the TD with a Set Dequeue Pointer Command.
685 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
686 * bit cleared) so that the HW will skip over them.
687 */
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +0300688static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
Andiry Xube88fe42010-10-14 07:22:57 -0700689 union xhci_trb *trb, struct xhci_event_cmd *event)
Sarah Sharpae636742009-04-29 19:02:31 -0700690{
Sarah Sharpae636742009-04-29 19:02:31 -0700691 unsigned int ep_index;
692 struct xhci_ring *ep_ring;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700693 struct xhci_virt_ep *ep;
Randy Dunlap326b4812010-04-19 08:53:50 -0700694 struct xhci_td *cur_td = NULL;
Sarah Sharpae636742009-04-29 19:02:31 -0700695 struct xhci_td *last_unlinked_td;
Felipe Balbi19a7d0d62017-04-07 17:56:57 +0300696 struct xhci_ep_ctx *ep_ctx;
697 struct xhci_virt_device *vdev;
Mathias Nymancdd504e2017-06-02 16:36:24 +0300698 u64 hw_deq;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700699 struct xhci_dequeue_state deq_state;
Sarah Sharpae636742009-04-29 19:02:31 -0700700
Xenia Ragiadakoubc752bd2013-09-09 13:29:59 +0300701 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
Mathias Nyman9ea18332014-05-08 19:26:02 +0300702 if (!xhci->devs[slot_id])
Andiry Xube88fe42010-10-14 07:22:57 -0700703 xhci_warn(xhci, "Stop endpoint command "
704 "completion for disabled slot %u\n",
705 slot_id);
706 return;
707 }
708
Sarah Sharpae636742009-04-29 19:02:31 -0700709 memset(&deq_state, 0, sizeof(deq_state));
Matt Evans28ccd292011-03-29 13:40:46 +1100710 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
Felipe Balbi19a7d0d62017-04-07 17:56:57 +0300711
712 vdev = xhci->devs[slot_id];
713 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
714 trace_xhci_handle_cmd_stop_ep(ep_ctx);
715
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700716 ep = &xhci->devs[slot_id]->eps[ep_index];
Felipe Balbi04861f82017-01-23 14:20:09 +0200717 last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
718 struct xhci_td, cancelled_td_list);
Sarah Sharpae636742009-04-29 19:02:31 -0700719
Sarah Sharp678539c2009-10-27 10:55:52 -0700720 if (list_empty(&ep->cancelled_td_list)) {
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700721 xhci_stop_watchdog_timer_in_irq(xhci, ep);
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700722 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -0700723 return;
Sarah Sharp678539c2009-10-27 10:55:52 -0700724 }
Sarah Sharpae636742009-04-29 19:02:31 -0700725
726 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
727 * We have the xHCI lock, so nothing can modify this list until we drop
728 * it. We're also in the event handler, so we can't get re-interrupted
729 * if another Stop Endpoint command completes
730 */
Felipe Balbi04861f82017-01-23 14:20:09 +0200731 list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) {
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300732 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
733 "Removing canceled TD starting at 0x%llx (dma).",
Sarah Sharp79688ac2011-12-19 16:56:04 -0800734 (unsigned long long)xhci_trb_virt_to_dma(
735 cur_td->start_seg, cur_td->first_trb));
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700736 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
737 if (!ep_ring) {
738 /* This shouldn't happen unless a driver is mucking
739 * with the stream ID after submission. This will
740 * leave the TD on the hardware ring, and the hardware
741 * will try to execute it, and may access a buffer
742 * that has already been freed. In the best case, the
743 * hardware will execute it, and the event handler will
744 * ignore the completion event for that TD, since it was
745 * removed from the td_list for that endpoint. In
746 * short, don't muck with the stream ID after
747 * submission.
748 */
749 xhci_warn(xhci, "WARN Cancelled URB %p "
750 "has invalid stream ID %u.\n",
751 cur_td->urb,
752 cur_td->urb->stream_id);
753 goto remove_finished_td;
754 }
Sarah Sharpae636742009-04-29 19:02:31 -0700755 /*
756 * If we stopped on the TD we need to cancel, then we have to
757 * move the xHC endpoint ring dequeue pointer past this TD.
758 */
Mathias Nymancdd504e2017-06-02 16:36:24 +0300759 hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index,
760 cur_td->urb->stream_id);
761 hw_deq &= ~0xf;
762
763 if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb,
764 cur_td->last_trb, hw_deq, false)) {
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700765 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
Mathias Nymancdd504e2017-06-02 16:36:24 +0300766 cur_td->urb->stream_id,
767 cur_td, &deq_state);
768 } else {
Sarah Sharp522989a2011-07-29 12:44:32 -0700769 td_to_noop(xhci, ep_ring, cur_td, false);
Mathias Nymancdd504e2017-06-02 16:36:24 +0300770 }
771
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700772remove_finished_td:
Sarah Sharpae636742009-04-29 19:02:31 -0700773 /*
774 * The event handler won't see a completion for this TD anymore,
775 * so remove it from the endpoint ring's TD list. Keep it in
776 * the cancelled TD list for URB completion later.
777 */
Sarah Sharp585df1d2011-08-02 15:43:40 -0700778 list_del_init(&cur_td->td_list);
Sarah Sharpae636742009-04-29 19:02:31 -0700779 }
Felipe Balbi04861f82017-01-23 14:20:09 +0200780
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700781 xhci_stop_watchdog_timer_in_irq(xhci, ep);
Sarah Sharpae636742009-04-29 19:02:31 -0700782
783 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
784 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
Hans de Goede1e3452e2014-08-20 16:41:52 +0300785 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
Mathias Nyman87907362017-06-02 16:36:23 +0300786 &deq_state);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700787 xhci_ring_cmd_db(xhci);
Sarah Sharpae636742009-04-29 19:02:31 -0700788 } else {
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700789 /* Otherwise ring the doorbell(s) to restart queued transfers */
790 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -0700791 }
Florian Wolter526867c2013-08-14 10:33:16 +0200792
Sarah Sharpae636742009-04-29 19:02:31 -0700793 /*
794 * Drop the lock and complete the URBs in the cancelled TD list.
795 * New TDs to be cancelled might be added to the end of the list before
796 * we can complete all the URBs for the TDs we already unlinked.
797 * So stop when we've completed the URB for the last TD we unlinked.
798 */
799 do {
Felipe Balbi04861f82017-01-23 14:20:09 +0200800 cur_td = list_first_entry(&ep->cancelled_td_list,
Sarah Sharpae636742009-04-29 19:02:31 -0700801 struct xhci_td, cancelled_td_list);
Sarah Sharp585df1d2011-08-02 15:43:40 -0700802 list_del_init(&cur_td->cancelled_td_list);
Sarah Sharpae636742009-04-29 19:02:31 -0700803
804 /* Clean up the cancelled URB */
Sarah Sharpae636742009-04-29 19:02:31 -0700805 /* Doesn't matter what we pass for status, since the core will
806 * just overwrite it (because the URB has been unlinked).
807 */
Arnd Bergmannf76a28a2016-06-30 14:26:17 +0200808 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
Felipe Balbia60f2f22017-01-23 14:20:14 +0200809 xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
Mathias Nyman2a721262016-11-11 15:13:24 +0200810 inc_td_cnt(cur_td->urb);
811 if (last_td_in_urb(cur_td))
812 xhci_giveback_urb_in_irq(xhci, cur_td, 0);
Sarah Sharpae636742009-04-29 19:02:31 -0700813
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700814 /* Stop processing the cancelled list if the watchdog timer is
815 * running.
816 */
817 if (xhci->xhc_state & XHCI_STATE_DYING)
818 return;
Sarah Sharpae636742009-04-29 19:02:31 -0700819 } while (cur_td != last_unlinked_td);
820
821 /* Return to the event handler with xhci->lock re-acquired */
822}
823
Sarah Sharp50e87252014-02-21 09:27:30 -0800824static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
825{
826 struct xhci_td *cur_td;
Felipe Balbia54cfae2017-01-23 14:20:17 +0200827 struct xhci_td *tmp;
Sarah Sharp50e87252014-02-21 09:27:30 -0800828
Felipe Balbia54cfae2017-01-23 14:20:17 +0200829 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
Sarah Sharp50e87252014-02-21 09:27:30 -0800830 list_del_init(&cur_td->td_list);
Felipe Balbia54cfae2017-01-23 14:20:17 +0200831
Sarah Sharp50e87252014-02-21 09:27:30 -0800832 if (!list_empty(&cur_td->cancelled_td_list))
833 list_del_init(&cur_td->cancelled_td_list);
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300834
Felipe Balbia60f2f22017-01-23 14:20:14 +0200835 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
Mathias Nyman2a721262016-11-11 15:13:24 +0200836
837 inc_td_cnt(cur_td->urb);
838 if (last_td_in_urb(cur_td))
839 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
Sarah Sharp50e87252014-02-21 09:27:30 -0800840 }
841}
842
843static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
844 int slot_id, int ep_index)
845{
846 struct xhci_td *cur_td;
Felipe Balbia54cfae2017-01-23 14:20:17 +0200847 struct xhci_td *tmp;
Sarah Sharp50e87252014-02-21 09:27:30 -0800848 struct xhci_virt_ep *ep;
849 struct xhci_ring *ring;
850
851 ep = &xhci->devs[slot_id]->eps[ep_index];
Sarah Sharp21d0e512014-02-21 14:29:02 -0800852 if ((ep->ep_state & EP_HAS_STREAMS) ||
853 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
854 int stream_id;
855
Mathias Nyman4b895862017-07-20 14:48:26 +0300856 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
Sarah Sharp21d0e512014-02-21 14:29:02 -0800857 stream_id++) {
Mathias Nyman4b895862017-07-20 14:48:26 +0300858 ring = ep->stream_info->stream_rings[stream_id];
859 if (!ring)
860 continue;
861
Sarah Sharp21d0e512014-02-21 14:29:02 -0800862 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
863 "Killing URBs for slot ID %u, ep index %u, stream %u",
Mathias Nyman4b895862017-07-20 14:48:26 +0300864 slot_id, ep_index, stream_id);
865 xhci_kill_ring_urbs(xhci, ring);
Sarah Sharp21d0e512014-02-21 14:29:02 -0800866 }
867 } else {
868 ring = ep->ring;
869 if (!ring)
870 return;
871 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
872 "Killing URBs for slot ID %u, ep index %u",
873 slot_id, ep_index);
874 xhci_kill_ring_urbs(xhci, ring);
875 }
Mathias Nyman2a721262016-11-11 15:13:24 +0200876
Felipe Balbia54cfae2017-01-23 14:20:17 +0200877 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
878 cancelled_td_list) {
879 list_del_init(&cur_td->cancelled_td_list);
Mathias Nyman2a721262016-11-11 15:13:24 +0200880 inc_td_cnt(cur_td->urb);
Felipe Balbia54cfae2017-01-23 14:20:17 +0200881
Mathias Nyman2a721262016-11-11 15:13:24 +0200882 if (last_td_in_urb(cur_td))
883 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
Sarah Sharp50e87252014-02-21 09:27:30 -0800884 }
885}
886
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300887/*
888 * host controller died, register read returns 0xffffffff
889 * Complete pending commands, mark them ABORTED.
890 * URBs need to be given back as usb core might be waiting with device locks
891 * held for the URBs to finish during device disconnect, blocking host remove.
892 *
893 * Call with xhci->lock held.
894 * lock is relased and re-acquired while giving back urb.
895 */
896void xhci_hc_died(struct xhci_hcd *xhci)
897{
898 int i, j;
899
900 if (xhci->xhc_state & XHCI_STATE_DYING)
901 return;
902
903 xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
904 xhci->xhc_state |= XHCI_STATE_DYING;
905
906 xhci_cleanup_command_queue(xhci);
907
908 /* return any pending urbs, remove may be waiting for them */
909 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
910 if (!xhci->devs[i])
911 continue;
912 for (j = 0; j < 31; j++)
913 xhci_kill_endpoint_urbs(xhci, i, j);
914 }
915
916 /* inform usb core hc died if PCI remove isn't already handling it */
917 if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
918 usb_hc_died(xhci_to_hcd(xhci));
919}
920
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700921/* Watchdog timer function for when a stop endpoint command fails to complete.
922 * In this case, we assume the host controller is broken or dying or dead. The
923 * host may still be completing some other events, so we have to be careful to
924 * let the event ring handler and the URB dequeueing/enqueueing functions know
925 * through xhci->state.
926 *
927 * The timer may also fire if the host takes a very long time to respond to the
928 * command, and the stop endpoint command completion handler cannot delete the
929 * timer before the timer function is called. Another endpoint cancellation may
930 * sneak in before the timer function can grab the lock, and that may queue
931 * another stop endpoint command and add the timer back. So we cannot use a
932 * simple flag to say whether there is a pending stop endpoint command for a
933 * particular endpoint.
934 *
Mathias Nymanf9926592017-01-23 14:19:53 +0200935 * Instead we use a combination of that flag and checking if a new timer is
936 * pending.
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700937 */
Kees Cook66a45502017-10-16 16:16:58 -0700938void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700939{
Kees Cook66a45502017-10-16 16:16:58 -0700940 struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
941 struct xhci_hcd *xhci = ep->xhci;
Don Zickusf43d6232011-10-20 23:52:14 -0400942 unsigned long flags;
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700943
Don Zickusf43d6232011-10-20 23:52:14 -0400944 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700945
Mathias Nymanf9926592017-01-23 14:19:53 +0200946 /* bail out if cmd completed but raced with stop ep watchdog timer.*/
947 if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
948 timer_pending(&ep->stop_cmd_timer)) {
Don Zickusf43d6232011-10-20 23:52:14 -0400949 spin_unlock_irqrestore(&xhci->lock, flags);
Mathias Nymanf9926592017-01-23 14:19:53 +0200950 xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700951 return;
952 }
953
954 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
Mathias Nymanf9926592017-01-23 14:19:53 +0200955 ep->ep_state &= ~EP_STOP_CMD_PENDING;
956
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300957 xhci_halt(xhci);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700958
Mathias Nymand9f11ba2017-04-07 17:57:01 +0300959 /*
960 * handle a stop endpoint cmd timeout as if host died (-ENODEV).
961 * In the future we could distinguish between -ENODEV and -ETIMEDOUT
962 * and try to recover a -ETIMEDOUT with a host controller reset
963 */
964 xhci_hc_died(xhci);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700965
Don Zickusf43d6232011-10-20 23:52:14 -0400966 spin_unlock_irqrestore(&xhci->lock, flags);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300967 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300968 "xHCI host controller is dead.");
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700969}
970
Andiry Xub008df62012-03-05 17:49:34 +0800971static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
972 struct xhci_virt_device *dev,
973 struct xhci_ring *ep_ring,
974 unsigned int ep_index)
975{
976 union xhci_trb *dequeue_temp;
977 int num_trbs_free_temp;
978 bool revert = false;
979
980 num_trbs_free_temp = ep_ring->num_trbs_free;
981 dequeue_temp = ep_ring->dequeue;
982
Sarah Sharp0d9f78a2012-06-21 16:28:30 -0700983 /* If we get two back-to-back stalls, and the first stalled transfer
984 * ends just before a link TRB, the dequeue pointer will be left on
985 * the link TRB by the code in the while loop. So we have to update
986 * the dequeue pointer one segment further, or we'll jump off
987 * the segment into la-la-land.
988 */
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300989 if (trb_is_link(ep_ring->dequeue)) {
Sarah Sharp0d9f78a2012-06-21 16:28:30 -0700990 ep_ring->deq_seg = ep_ring->deq_seg->next;
991 ep_ring->dequeue = ep_ring->deq_seg->trbs;
992 }
993
Andiry Xub008df62012-03-05 17:49:34 +0800994 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
995 /* We have more usable TRBs */
996 ep_ring->num_trbs_free++;
997 ep_ring->dequeue++;
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300998 if (trb_is_link(ep_ring->dequeue)) {
Andiry Xub008df62012-03-05 17:49:34 +0800999 if (ep_ring->dequeue ==
1000 dev->eps[ep_index].queued_deq_ptr)
1001 break;
1002 ep_ring->deq_seg = ep_ring->deq_seg->next;
1003 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1004 }
1005 if (ep_ring->dequeue == dequeue_temp) {
1006 revert = true;
1007 break;
1008 }
1009 }
1010
1011 if (revert) {
1012 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1013 ep_ring->num_trbs_free = num_trbs_free_temp;
1014 }
1015}
1016
Sarah Sharpae636742009-04-29 19:02:31 -07001017/*
1018 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1019 * we need to clear the set deq pending flag in the endpoint ring state, so that
1020 * the TD queueing code can ring the doorbell again. We also need to ring the
1021 * endpoint doorbell to restart the ring, but only if there aren't more
1022 * cancellations pending.
1023 */
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001024static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001025 union xhci_trb *trb, u32 cmd_comp_code)
Sarah Sharpae636742009-04-29 19:02:31 -07001026{
Sarah Sharpae636742009-04-29 19:02:31 -07001027 unsigned int ep_index;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001028 unsigned int stream_id;
Sarah Sharpae636742009-04-29 19:02:31 -07001029 struct xhci_ring *ep_ring;
1030 struct xhci_virt_device *dev;
Hans de Goede9aad95e2013-10-04 00:29:49 +02001031 struct xhci_virt_ep *ep;
John Yound115b042009-07-27 12:05:15 -07001032 struct xhci_ep_ctx *ep_ctx;
1033 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpae636742009-04-29 19:02:31 -07001034
Matt Evans28ccd292011-03-29 13:40:46 +11001035 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1036 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
Sarah Sharpae636742009-04-29 19:02:31 -07001037 dev = xhci->devs[slot_id];
Hans de Goede9aad95e2013-10-04 00:29:49 +02001038 ep = &dev->eps[ep_index];
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001039
1040 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
1041 if (!ep_ring) {
Oliver Neukume587b8b2014-01-08 17:13:11 +01001042 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001043 stream_id);
1044 /* XXX: Harmless??? */
Hans de Goede0d4976e2014-08-20 16:41:55 +03001045 goto cleanup;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001046 }
1047
John Yound115b042009-07-27 12:05:15 -07001048 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1049 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001050 trace_xhci_handle_cmd_set_deq(slot_ctx);
1051 trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
Sarah Sharpae636742009-04-29 19:02:31 -07001052
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001053 if (cmd_comp_code != COMP_SUCCESS) {
Sarah Sharpae636742009-04-29 19:02:31 -07001054 unsigned int ep_state;
1055 unsigned int slot_state;
1056
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001057 switch (cmd_comp_code) {
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001058 case COMP_TRB_ERROR:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001059 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
Sarah Sharpae636742009-04-29 19:02:31 -07001060 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001061 case COMP_CONTEXT_STATE_ERROR:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001062 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
Mathias Nyman5071e6b2016-11-11 15:13:28 +02001063 ep_state = GET_EP_CTX_STATE(ep_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11001064 slot_state = le32_to_cpu(slot_ctx->dev_state);
Sarah Sharpae636742009-04-29 19:02:31 -07001065 slot_state = GET_SLOT_STATE(slot_state);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +03001066 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1067 "Slot state = %u, EP state = %u",
Sarah Sharpae636742009-04-29 19:02:31 -07001068 slot_state, ep_state);
1069 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001070 case COMP_SLOT_NOT_ENABLED_ERROR:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001071 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1072 slot_id);
Sarah Sharpae636742009-04-29 19:02:31 -07001073 break;
1074 default:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001075 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1076 cmd_comp_code);
Sarah Sharpae636742009-04-29 19:02:31 -07001077 break;
1078 }
1079 /* OK what do we do now? The endpoint state is hosed, and we
1080 * should never get to this point if the synchronization between
1081 * queueing, and endpoint state are correct. This might happen
1082 * if the device gets disconnected after we've finished
1083 * cancelling URBs, which might not be an error...
1084 */
1085 } else {
Hans de Goede9aad95e2013-10-04 00:29:49 +02001086 u64 deq;
1087 /* 4.6.10 deq ptr is written to the stream ctx for streams */
1088 if (ep->ep_state & EP_HAS_STREAMS) {
1089 struct xhci_stream_ctx *ctx =
1090 &ep->stream_info->stream_ctx_array[stream_id];
1091 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1092 } else {
1093 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1094 }
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +03001095 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
Hans de Goede9aad95e2013-10-04 00:29:49 +02001096 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1097 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1098 ep->queued_deq_ptr) == deq) {
Sarah Sharpbf161e82011-02-23 15:46:42 -08001099 /* Update the ring's dequeue segment and dequeue pointer
1100 * to reflect the new position.
1101 */
Andiry Xub008df62012-03-05 17:49:34 +08001102 update_ring_for_set_deq_completion(xhci, dev,
1103 ep_ring, ep_index);
Sarah Sharpbf161e82011-02-23 15:46:42 -08001104 } else {
Oliver Neukume587b8b2014-01-08 17:13:11 +01001105 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
Sarah Sharpbf161e82011-02-23 15:46:42 -08001106 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
Hans de Goede9aad95e2013-10-04 00:29:49 +02001107 ep->queued_deq_seg, ep->queued_deq_ptr);
Sarah Sharpbf161e82011-02-23 15:46:42 -08001108 }
Sarah Sharpae636742009-04-29 19:02:31 -07001109 }
1110
Hans de Goede0d4976e2014-08-20 16:41:55 +03001111cleanup:
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001112 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
Sarah Sharpbf161e82011-02-23 15:46:42 -08001113 dev->eps[ep_index].queued_deq_seg = NULL;
1114 dev->eps[ep_index].queued_deq_ptr = NULL;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001115 /* Restart any rings with pending URBs */
1116 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -07001117}
1118
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001119static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001120 union xhci_trb *trb, u32 cmd_comp_code)
Sarah Sharpa1587d92009-07-27 12:03:15 -07001121{
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001122 struct xhci_virt_device *vdev;
1123 struct xhci_ep_ctx *ep_ctx;
Sarah Sharpa1587d92009-07-27 12:03:15 -07001124 unsigned int ep_index;
1125
Matt Evans28ccd292011-03-29 13:40:46 +11001126 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001127 vdev = xhci->devs[slot_id];
1128 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
1129 trace_xhci_handle_cmd_reset_ep(ep_ctx);
1130
Sarah Sharpa1587d92009-07-27 12:03:15 -07001131 /* This command will only fail if the endpoint wasn't halted,
1132 * but we don't care.
1133 */
Xenia Ragiadakoua0254322013-08-06 07:52:46 +03001134 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001135 "Ignoring reset ep completion code of %u", cmd_comp_code);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001136
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001137 /* HW with the reset endpoint quirk needs to have a configure endpoint
1138 * command complete before the endpoint can be used. Queue that here
1139 * because the HW can't handle two commands being queued in a row.
1140 */
1141 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001142 struct xhci_command *command;
Lu Baolu74e0b562017-04-07 17:57:05 +03001143
Mathias Nyman103afda2017-12-08 17:59:08 +02001144 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
Lu Baolu74e0b562017-04-07 17:57:05 +03001145 if (!command)
Hans de Goedea0ee6192014-07-25 22:01:21 +02001146 return;
Lu Baolu74e0b562017-04-07 17:57:05 +03001147
Xenia Ragiadakou4bdfe4c2013-08-06 07:52:45 +03001148 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1149 "Queueing configure endpoint command");
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001150 xhci_queue_configure_endpoint(xhci, command,
Sarah Sharp913a8a32009-09-04 10:53:13 -07001151 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1152 false);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001153 xhci_ring_cmd_db(xhci);
1154 } else {
Mathias Nymanc3492db2014-11-18 11:27:11 +02001155 /* Clear our internal halted state */
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001156 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001157 }
Sarah Sharpa1587d92009-07-27 12:03:15 -07001158}
Sarah Sharpae636742009-04-29 19:02:31 -07001159
Xenia Ragiadakoub244b432013-09-09 13:29:47 +03001160static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
Lu Baoluc2d3d492016-11-11 15:13:31 +02001161 struct xhci_command *command, u32 cmd_comp_code)
Xenia Ragiadakoub244b432013-09-09 13:29:47 +03001162{
1163 if (cmd_comp_code == COMP_SUCCESS)
Lu Baoluc2d3d492016-11-11 15:13:31 +02001164 command->slot_id = slot_id;
Xenia Ragiadakoub244b432013-09-09 13:29:47 +03001165 else
Lu Baoluc2d3d492016-11-11 15:13:31 +02001166 command->slot_id = 0;
Xenia Ragiadakoub244b432013-09-09 13:29:47 +03001167}
1168
Xenia Ragiadakou6c02dd12013-09-09 13:29:48 +03001169static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1170{
1171 struct xhci_virt_device *virt_dev;
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001172 struct xhci_slot_ctx *slot_ctx;
Xenia Ragiadakou6c02dd12013-09-09 13:29:48 +03001173
1174 virt_dev = xhci->devs[slot_id];
1175 if (!virt_dev)
1176 return;
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001177
1178 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1179 trace_xhci_handle_cmd_disable_slot(slot_ctx);
1180
Xenia Ragiadakou6c02dd12013-09-09 13:29:48 +03001181 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1182 /* Delete default control endpoint resources */
1183 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1184 xhci_free_virt_device(xhci, slot_id);
1185}
1186
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001187static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1188 struct xhci_event_cmd *event, u32 cmd_comp_code)
1189{
1190 struct xhci_virt_device *virt_dev;
1191 struct xhci_input_control_ctx *ctrl_ctx;
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001192 struct xhci_ep_ctx *ep_ctx;
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001193 unsigned int ep_index;
1194 unsigned int ep_state;
1195 u32 add_flags, drop_flags;
1196
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001197 /*
1198 * Configure endpoint commands can come from the USB core
1199 * configuration or alt setting changes, or because the HW
1200 * needed an extra configure endpoint command after a reset
1201 * endpoint command or streams were being configured.
1202 * If the command was for a halted endpoint, the xHCI driver
1203 * is not waiting on the configure endpoint command.
1204 */
Mathias Nyman9ea18332014-05-08 19:26:02 +03001205 virt_dev = xhci->devs[slot_id];
Lin Wang4daf9df2015-01-09 16:06:31 +02001206 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001207 if (!ctrl_ctx) {
1208 xhci_warn(xhci, "Could not get input context, bad type.\n");
1209 return;
1210 }
1211
1212 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1213 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1214 /* Input ctx add_flags are the endpoint index plus one */
1215 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1216
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001217 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
1218 trace_xhci_handle_cmd_config_ep(ep_ctx);
1219
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001220 /* A usb_set_interface() call directly after clearing a halted
1221 * condition may race on this quirky hardware. Not worth
1222 * worrying about, since this is prototype hardware. Not sure
1223 * if this will work for streams, but streams support was
1224 * untested on this prototype.
1225 */
1226 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1227 ep_index != (unsigned int) -1 &&
1228 add_flags - SLOT_FLAG == drop_flags) {
1229 ep_state = virt_dev->eps[ep_index].ep_state;
1230 if (!(ep_state & EP_HALTED))
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001231 return;
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001232 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1233 "Completed config ep cmd - "
1234 "last ep index = %d, state = %d",
1235 ep_index, ep_state);
1236 /* Clear internal halted state and restart ring(s) */
1237 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1238 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1239 return;
1240 }
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001241 return;
1242}
1243
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001244static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
1245{
1246 struct xhci_virt_device *vdev;
1247 struct xhci_slot_ctx *slot_ctx;
1248
1249 vdev = xhci->devs[slot_id];
1250 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1251 trace_xhci_handle_cmd_addr_dev(slot_ctx);
1252}
1253
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001254static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1255 struct xhci_event_cmd *event)
1256{
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001257 struct xhci_virt_device *vdev;
1258 struct xhci_slot_ctx *slot_ctx;
1259
1260 vdev = xhci->devs[slot_id];
1261 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1262 trace_xhci_handle_cmd_reset_dev(slot_ctx);
1263
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001264 xhci_dbg(xhci, "Completed reset device command.\n");
Mathias Nyman9ea18332014-05-08 19:26:02 +03001265 if (!xhci->devs[slot_id])
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001266 xhci_warn(xhci, "Reset device command completion "
1267 "for disabled slot %u\n", slot_id);
1268}
1269
Xenia Ragiadakou2c070822013-09-09 13:29:52 +03001270static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1271 struct xhci_event_cmd *event)
1272{
1273 if (!(xhci->quirks & XHCI_NEC_HOST)) {
Lu Baoluf4c8f032016-11-11 15:13:25 +02001274 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
Xenia Ragiadakou2c070822013-09-09 13:29:52 +03001275 return;
1276 }
1277 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1278 "NEC firmware version %2x.%02x",
1279 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1280 NEC_FW_MINOR(le32_to_cpu(event->status)));
1281}
1282
Mathias Nyman9ea18332014-05-08 19:26:02 +03001283static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001284{
1285 list_del(&cmd->cmd_list);
Mathias Nyman9ea18332014-05-08 19:26:02 +03001286
1287 if (cmd->completion) {
1288 cmd->status = status;
1289 complete(cmd->completion);
1290 } else {
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001291 kfree(cmd);
Mathias Nyman9ea18332014-05-08 19:26:02 +03001292 }
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001293}
1294
1295void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1296{
1297 struct xhci_command *cur_cmd, *tmp_cmd;
Jeffy Chend1aad522017-10-06 17:45:28 +03001298 xhci->current_cmd = NULL;
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001299 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001300 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001301}
1302
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02001303void xhci_handle_command_timeout(struct work_struct *work)
Mathias Nymanc311e392014-05-08 19:26:03 +03001304{
1305 struct xhci_hcd *xhci;
Mathias Nymanc311e392014-05-08 19:26:03 +03001306 unsigned long flags;
1307 u64 hw_ring_state;
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02001308
1309 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
Mathias Nymanc311e392014-05-08 19:26:03 +03001310
Mathias Nymanc311e392014-05-08 19:26:03 +03001311 spin_lock_irqsave(&xhci->lock, flags);
Lu Baolu2b985462017-01-03 18:28:46 +02001312
Mathias Nymana5a1b952017-01-03 18:28:48 +02001313 /*
1314 * If timeout work is pending, or current_cmd is NULL, it means we
1315 * raced with command completion. Command is handled so just return.
1316 */
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02001317 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
Lu Baolu2b985462017-01-03 18:28:46 +02001318 spin_unlock_irqrestore(&xhci->lock, flags);
1319 return;
Mathias Nymanc311e392014-05-08 19:26:03 +03001320 }
Lu Baolu2b985462017-01-03 18:28:46 +02001321 /* mark this command to be cancelled */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001322 xhci->current_cmd->status = COMP_COMMAND_ABORTED;
Lu Baolu2b985462017-01-03 18:28:46 +02001323
Mathias Nymanc311e392014-05-08 19:26:03 +03001324 /* Make sure command ring is running before aborting it */
1325 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
Mathias Nymand9f11ba2017-04-07 17:57:01 +03001326 if (hw_ring_state == ~(u64)0) {
1327 xhci_hc_died(xhci);
1328 goto time_out_completed;
1329 }
1330
Mathias Nymanc311e392014-05-08 19:26:03 +03001331 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1332 (hw_ring_state & CMD_RING_RUNNING)) {
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +02001333 /* Prevent new doorbell, and start command abort */
1334 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
Mathias Nymanc311e392014-05-08 19:26:03 +03001335 xhci_dbg(xhci, "Command timeout\n");
Mathias Nymand9f11ba2017-04-07 17:57:01 +03001336 xhci_abort_cmd_ring(xhci, flags);
Lu Baolu4dea7072017-01-03 18:28:49 +02001337 goto time_out_completed;
Mathias Nymanc311e392014-05-08 19:26:03 +03001338 }
Mathias Nyman3425aa02016-06-01 18:09:08 +03001339
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +02001340 /* host removed. Bail out */
1341 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
1342 xhci_dbg(xhci, "host removed, ring start fail?\n");
Mathias Nyman3425aa02016-06-01 18:09:08 +03001343 xhci_cleanup_command_queue(xhci);
Lu Baolu4dea7072017-01-03 18:28:49 +02001344
1345 goto time_out_completed;
Mathias Nyman3425aa02016-06-01 18:09:08 +03001346 }
1347
Mathias Nymanc311e392014-05-08 19:26:03 +03001348 /* command timeout on stopped ring, ring can't be aborted */
1349 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1350 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
Lu Baolu4dea7072017-01-03 18:28:49 +02001351
1352time_out_completed:
Mathias Nymanc311e392014-05-08 19:26:03 +03001353 spin_unlock_irqrestore(&xhci->lock, flags);
1354 return;
1355}
1356
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001357static void handle_cmd_completion(struct xhci_hcd *xhci,
1358 struct xhci_event_cmd *event)
1359{
Matt Evans28ccd292011-03-29 13:40:46 +11001360 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001361 u64 cmd_dma;
1362 dma_addr_t cmd_dequeue_dma;
Xenia Ragiadakoue7a79a12013-09-09 13:29:56 +03001363 u32 cmd_comp_code;
Xenia Ragiadakou9124b122013-09-09 13:29:57 +03001364 union xhci_trb *cmd_trb;
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001365 struct xhci_command *cmd;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001366 u32 cmd_type;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001367
Matt Evans28ccd292011-03-29 13:40:46 +11001368 cmd_dma = le64_to_cpu(event->cmd_trb);
Xenia Ragiadakou9124b122013-09-09 13:29:57 +03001369 cmd_trb = xhci->cmd_ring->dequeue;
Felipe Balbia37c3f72017-01-23 14:20:19 +02001370
1371 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
1372
Sarah Sharp23e3be12009-04-29 19:05:20 -07001373 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
Xenia Ragiadakou9124b122013-09-09 13:29:57 +03001374 cmd_trb);
Lu Baoluf4c8f032016-11-11 15:13:25 +02001375 /*
1376 * Check whether the completion event is for our internal kept
1377 * command.
1378 */
1379 if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
1380 xhci_warn(xhci,
1381 "ERROR mismatched command completion event\n");
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001382 return;
1383 }
Elric Fub63f4052012-06-27 16:55:43 +08001384
Felipe Balbi04861f82017-01-23 14:20:09 +02001385 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001386
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02001387 cancel_delayed_work(&xhci->cmd_timer);
Mathias Nymanc311e392014-05-08 19:26:03 +03001388
Xenia Ragiadakoue7a79a12013-09-09 13:29:56 +03001389 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
Mathias Nymanc311e392014-05-08 19:26:03 +03001390
1391 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
Mathias Nyman604d02a2017-05-17 18:32:05 +03001392 if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
OGAWA Hirofumi1c111b62017-01-03 18:28:51 +02001393 complete_all(&xhci->cmd_ring_stop_completion);
Mathias Nymanc311e392014-05-08 19:26:03 +03001394 return;
1395 }
Mathias Nyman33be1262016-08-16 10:18:03 +03001396
1397 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1398 xhci_err(xhci,
1399 "Command completion event does not match command\n");
1400 return;
1401 }
1402
Mathias Nymanc311e392014-05-08 19:26:03 +03001403 /*
1404 * Host aborted the command ring, check if the current command was
1405 * supposed to be aborted, otherwise continue normally.
1406 * The command ring is stopped now, but the xHC will issue a Command
1407 * Ring Stopped event which will cause us to restart it.
1408 */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001409 if (cmd_comp_code == COMP_COMMAND_ABORTED) {
Mathias Nymanc311e392014-05-08 19:26:03 +03001410 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001411 if (cmd->status == COMP_COMMAND_ABORTED) {
Baolin Wang2a7cfdf2017-01-03 18:28:47 +02001412 if (xhci->current_cmd == cmd)
1413 xhci->current_cmd = NULL;
Mathias Nymanc311e392014-05-08 19:26:03 +03001414 goto event_handled;
Baolin Wang2a7cfdf2017-01-03 18:28:47 +02001415 }
Elric Fub63f4052012-06-27 16:55:43 +08001416 }
1417
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001418 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1419 switch (cmd_type) {
1420 case TRB_ENABLE_SLOT:
Lu Baoluc2d3d492016-11-11 15:13:31 +02001421 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001422 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001423 case TRB_DISABLE_SLOT:
Xenia Ragiadakou6c02dd12013-09-09 13:29:48 +03001424 xhci_handle_cmd_disable_slot(xhci, slot_id);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001425 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001426 case TRB_CONFIG_EP:
Mathias Nyman9ea18332014-05-08 19:26:02 +03001427 if (!cmd->completion)
1428 xhci_handle_cmd_config_ep(xhci, slot_id, event,
1429 cmd_comp_code);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001430 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001431 case TRB_EVAL_CONTEXT:
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001432 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001433 case TRB_ADDR_DEV:
Felipe Balbi19a7d0d62017-04-07 17:56:57 +03001434 xhci_handle_cmd_addr_dev(xhci, slot_id);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001435 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001436 case TRB_STOP_RING:
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001437 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1438 le32_to_cpu(cmd_trb->generic.field[3])));
1439 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
Sarah Sharpae636742009-04-29 19:02:31 -07001440 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001441 case TRB_SET_DEQ:
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001442 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1443 le32_to_cpu(cmd_trb->generic.field[3])));
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001444 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
Sarah Sharpae636742009-04-29 19:02:31 -07001445 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001446 case TRB_CMD_NOOP:
Mathias Nymanc311e392014-05-08 19:26:03 +03001447 /* Is this an aborted command turned to NO-OP? */
Mathias Nyman604d02a2017-05-17 18:32:05 +03001448 if (cmd->status == COMP_COMMAND_RING_STOPPED)
1449 cmd_comp_code = COMP_COMMAND_RING_STOPPED;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001450 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001451 case TRB_RESET_EP:
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001452 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1453 le32_to_cpu(cmd_trb->generic.field[3])));
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001454 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001455 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001456 case TRB_RESET_DEV:
Mathias Nyman6fcfb0d2014-06-24 17:14:40 +03001457 /* SLOT_ID field in reset device cmd completion event TRB is 0.
1458 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1459 */
1460 slot_id = TRB_TO_SLOT_ID(
1461 le32_to_cpu(cmd_trb->generic.field[3]));
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001462 xhci_handle_cmd_reset_dev(xhci, slot_id, event);
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08001463 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001464 case TRB_NEC_GET_FW:
Xenia Ragiadakou2c070822013-09-09 13:29:52 +03001465 xhci_handle_cmd_nec_get_fw(xhci, event);
Sarah Sharp02386342010-05-24 13:25:28 -07001466 break;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001467 default:
1468 /* Skip over unknown commands on the event ring */
Lu Baoluf4c8f032016-11-11 15:13:25 +02001469 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001470 break;
1471 }
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001472
Mathias Nymanc311e392014-05-08 19:26:03 +03001473 /* restart timer if this wasn't the last command */
Lu Baoludaa47f22017-01-23 14:20:02 +02001474 if (!list_is_singular(&xhci->cmd_list)) {
Felipe Balbi04861f82017-01-23 14:20:09 +02001475 xhci->current_cmd = list_first_entry(&cmd->cmd_list,
1476 struct xhci_command, cmd_list);
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02001477 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
Lu Baolu2b985462017-01-03 18:28:46 +02001478 } else if (xhci->current_cmd == cmd) {
1479 xhci->current_cmd = NULL;
Mathias Nymanc311e392014-05-08 19:26:03 +03001480 }
1481
1482event_handled:
Mathias Nyman9ea18332014-05-08 19:26:02 +03001483 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001484
Andiry Xu3b72fca2012-03-05 17:49:32 +08001485 inc_deq(xhci, xhci->cmd_ring);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001486}
1487
Sarah Sharp02386342010-05-24 13:25:28 -07001488static void handle_vendor_event(struct xhci_hcd *xhci,
1489 union xhci_trb *event)
1490{
1491 u32 trb_type;
1492
Matt Evans28ccd292011-03-29 13:40:46 +11001493 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
Sarah Sharp02386342010-05-24 13:25:28 -07001494 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1495 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1496 handle_cmd_completion(xhci, &event->event_cmd);
1497}
1498
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001499/* @port_id: the one-based port ID from the hardware (indexed from array of all
1500 * port registers -- USB 3.0 and USB 2.0).
1501 *
1502 * Returns a zero-based port number, which is suitable for indexing into each of
1503 * the split roothubs' port arrays and bus state arrays.
Sarah Sharpd0cd5d42011-11-14 17:51:39 -08001504 * Add one to it in order to call xhci_find_slot_id_by_port.
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001505 */
1506static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1507 struct xhci_hcd *xhci, u32 port_id)
1508{
1509 unsigned int i;
1510 unsigned int num_similar_speed_ports = 0;
1511
1512 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1513 * and usb2_ports are 0-based indexes. Count the number of similar
1514 * speed ports, up to 1 port before this port.
1515 */
1516 for (i = 0; i < (port_id - 1); i++) {
1517 u8 port_speed = xhci->port_array[i];
1518
1519 /*
1520 * Skip ports that don't have known speeds, or have duplicate
1521 * Extended Capabilities port speed entries.
1522 */
Dan Carpenter22e04872011-03-17 22:39:49 +03001523 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001524 continue;
1525
1526 /*
1527 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1528 * 1.1 ports are under the USB 2.0 hub. If the port speed
1529 * matches the device speed, it's a similar speed port.
1530 */
Mathias Nymanb50107b2015-10-01 18:40:38 +03001531 if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001532 num_similar_speed_ports++;
1533 }
1534 return num_similar_speed_ports;
1535}
1536
Sarah Sharp623bef92011-11-11 14:57:33 -08001537static void handle_device_notification(struct xhci_hcd *xhci,
1538 union xhci_trb *event)
1539{
1540 u32 slot_id;
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001541 struct usb_device *udev;
Sarah Sharp623bef92011-11-11 14:57:33 -08001542
Xenia Ragiadakou7e76ad42013-09-09 21:03:10 +03001543 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001544 if (!xhci->devs[slot_id]) {
Sarah Sharp623bef92011-11-11 14:57:33 -08001545 xhci_warn(xhci, "Device Notification event for "
1546 "unused slot %u\n", slot_id);
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001547 return;
1548 }
1549
1550 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1551 slot_id);
1552 udev = xhci->devs[slot_id]->udev;
1553 if (udev && udev->parent)
1554 usb_wakeup_notification(udev->parent, udev->portnum);
Sarah Sharp623bef92011-11-11 14:57:33 -08001555}
1556
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001557static void handle_port_status(struct xhci_hcd *xhci,
1558 union xhci_trb *event)
1559{
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001560 struct usb_hcd *hcd;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001561 u32 port_id;
Mathias Nyman76a0f322017-08-16 14:23:23 +03001562 u32 portsc, cmd_reg;
Sarah Sharp518e8482010-12-15 11:56:29 -08001563 int max_ports;
Andiry Xu56192532010-10-14 07:23:00 -07001564 int slot_id;
Sarah Sharp5308a912010-12-01 11:34:59 -08001565 unsigned int faked_port_index;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001566 u8 major_revision;
Sarah Sharp20b67cf2010-12-15 12:47:14 -08001567 struct xhci_bus_state *bus_state;
Matt Evans28ccd292011-03-29 13:40:46 +11001568 __le32 __iomem **port_array;
Sarah Sharp386139d2011-03-24 08:02:58 -07001569 bool bogus_port_status = false;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001570
1571 /* Port status change events always have a successful completion code */
Lu Baoluf4c8f032016-11-11 15:13:25 +02001572 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
1573 xhci_warn(xhci,
1574 "WARN: xHC returned failed port status event\n");
1575
Matt Evans28ccd292011-03-29 13:40:46 +11001576 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001577 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1578
Sarah Sharp518e8482010-12-15 11:56:29 -08001579 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1580 if ((port_id <= 0) || (port_id > max_ports)) {
Andiry Xu56192532010-10-14 07:23:00 -07001581 xhci_warn(xhci, "Invalid port id %d\n", port_id);
Peter Chen09ce0c02013-03-20 09:30:00 +08001582 inc_deq(xhci, xhci->event_ring);
1583 return;
Andiry Xu56192532010-10-14 07:23:00 -07001584 }
1585
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001586 /* Figure out which usb_hcd this port is attached to:
1587 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1588 */
1589 major_revision = xhci->port_array[port_id - 1];
Peter Chen09ce0c02013-03-20 09:30:00 +08001590
1591 /* Find the right roothub. */
1592 hcd = xhci_to_hcd(xhci);
Mathias Nymanb50107b2015-10-01 18:40:38 +03001593 if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
Peter Chen09ce0c02013-03-20 09:30:00 +08001594 hcd = xhci->shared_hcd;
1595
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001596 if (major_revision == 0) {
1597 xhci_warn(xhci, "Event for port %u not in "
1598 "Extended Capabilities, ignoring.\n",
1599 port_id);
Sarah Sharp386139d2011-03-24 08:02:58 -07001600 bogus_port_status = true;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001601 goto cleanup;
1602 }
Dan Carpenter22e04872011-03-17 22:39:49 +03001603 if (major_revision == DUPLICATE_ENTRY) {
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001604 xhci_warn(xhci, "Event for port %u duplicated in"
1605 "Extended Capabilities, ignoring.\n",
1606 port_id);
Sarah Sharp386139d2011-03-24 08:02:58 -07001607 bogus_port_status = true;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001608 goto cleanup;
Sarah Sharp5308a912010-12-01 11:34:59 -08001609 }
1610
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001611 /*
1612 * Hardware port IDs reported by a Port Status Change Event include USB
1613 * 3.0 and USB 2.0 ports. We want to check if the port has reported a
1614 * resume event, but we first need to translate the hardware port ID
1615 * into the index into the ports on the correct split roothub, and the
1616 * correct bus_state structure.
1617 */
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001618 bus_state = &xhci->bus_state[hcd_index(hcd)];
Mathias Nymanb50107b2015-10-01 18:40:38 +03001619 if (hcd->speed >= HCD_USB3)
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001620 port_array = xhci->usb3_ports;
1621 else
1622 port_array = xhci->usb2_ports;
1623 /* Find the faked port hub number */
1624 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1625 port_id);
Mathias Nyman76a0f322017-08-16 14:23:23 +03001626 portsc = readl(port_array[faked_port_index]);
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001627
Mathias Nyman8ca13582017-08-16 14:23:24 +03001628 trace_xhci_handle_port_status(faked_port_index, portsc);
1629
Sarah Sharp7111ebc2010-12-14 13:24:55 -08001630 if (hcd->state == HC_STATE_SUSPENDED) {
Andiry Xu56192532010-10-14 07:23:00 -07001631 xhci_dbg(xhci, "resume root hub\n");
1632 usb_hcd_resume_root_hub(hcd);
1633 }
1634
Mathias Nyman76a0f322017-08-16 14:23:23 +03001635 if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE)
Zhuang Jin Canfac42712015-07-21 17:20:30 +03001636 bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1637
Mathias Nyman76a0f322017-08-16 14:23:23 +03001638 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
Andiry Xu56192532010-10-14 07:23:00 -07001639 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1640
Mathias Nyman76a0f322017-08-16 14:23:23 +03001641 cmd_reg = readl(&xhci->op_regs->command);
1642 if (!(cmd_reg & CMD_RUN)) {
Andiry Xu56192532010-10-14 07:23:00 -07001643 xhci_warn(xhci, "xHC is not running.\n");
1644 goto cleanup;
1645 }
1646
Mathias Nyman76a0f322017-08-16 14:23:23 +03001647 if (DEV_SUPERSPEED_ANY(portsc)) {
Sarah Sharpd93814c2012-01-24 16:39:02 -08001648 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001649 /* Set a flag to say the port signaled remote wakeup,
1650 * so we can tell the difference between the end of
1651 * device and host initiated resume.
1652 */
1653 bus_state->port_remote_wakeup |= 1 << faked_port_index;
Sarah Sharpd93814c2012-01-24 16:39:02 -08001654 xhci_test_and_clear_bit(xhci, port_array,
1655 faked_port_index, PORT_PLC);
Andiry Xuc9682df2011-09-23 14:19:48 -07001656 xhci_set_link_state(xhci, port_array, faked_port_index,
1657 XDEV_U0);
Sarah Sharpd93814c2012-01-24 16:39:02 -08001658 /* Need to wait until the next link state change
1659 * indicates the device is actually in U0.
1660 */
1661 bogus_port_status = true;
1662 goto cleanup;
Mathias Nymanf69115f2015-12-11 14:38:06 +02001663 } else if (!test_bit(faked_port_index,
1664 &bus_state->resuming_ports)) {
Andiry Xu56192532010-10-14 07:23:00 -07001665 xhci_dbg(xhci, "resume HS port %d\n", port_id);
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001666 bus_state->resume_done[faked_port_index] = jiffies +
Felipe Balbib9e45182015-02-13 14:39:13 -06001667 msecs_to_jiffies(USB_RESUME_TIMEOUT);
Andiry Xuf370b992012-04-14 02:54:30 +08001668 set_bit(faked_port_index, &bus_state->resuming_ports);
Anshuman Gupta0914ea62017-10-05 11:21:46 +03001669 /* Do the rest in GetPortStatus after resume time delay.
1670 * Avoid polling roothub status before that so that a
1671 * usb device auto-resume latency around ~40ms.
1672 */
1673 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
Andiry Xu56192532010-10-14 07:23:00 -07001674 mod_timer(&hcd->rh_timer,
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001675 bus_state->resume_done[faked_port_index]);
Anshuman Gupta0914ea62017-10-05 11:21:46 +03001676 bogus_port_status = true;
Andiry Xu56192532010-10-14 07:23:00 -07001677 }
1678 }
1679
Mathias Nyman76a0f322017-08-16 14:23:23 +03001680 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
1681 DEV_SUPERSPEED_ANY(portsc)) {
Sarah Sharpd93814c2012-01-24 16:39:02 -08001682 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001683 /* We've just brought the device into U0 through either the
1684 * Resume state after a device remote wakeup, or through the
1685 * U3Exit state after a host-initiated resume. If it's a device
1686 * initiated remote wake, don't pass up the link state change,
1687 * so the roothub behavior is consistent with external
1688 * USB 3.0 hub behavior.
1689 */
Sarah Sharpd93814c2012-01-24 16:39:02 -08001690 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1691 faked_port_index + 1);
1692 if (slot_id && xhci->devs[slot_id])
1693 xhci_ring_device(xhci, slot_id);
Nickolai Zeldovichba7b5c22013-01-07 22:39:31 -05001694 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001695 bus_state->port_remote_wakeup &=
1696 ~(1 << faked_port_index);
1697 xhci_test_and_clear_bit(xhci, port_array,
1698 faked_port_index, PORT_PLC);
1699 usb_wakeup_notification(hcd->self.root_hub,
1700 faked_port_index + 1);
1701 bogus_port_status = true;
1702 goto cleanup;
1703 }
Sarah Sharpd93814c2012-01-24 16:39:02 -08001704 }
1705
Sarah Sharp8b3d4572013-08-20 08:12:12 -07001706 /*
1707 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1708 * RExit to a disconnect state). If so, let the the driver know it's
1709 * out of the RExit state.
1710 */
Mathias Nyman76a0f322017-08-16 14:23:23 +03001711 if (!DEV_SUPERSPEED_ANY(portsc) &&
Sarah Sharp8b3d4572013-08-20 08:12:12 -07001712 test_and_clear_bit(faked_port_index,
1713 &bus_state->rexit_ports)) {
1714 complete(&bus_state->rexit_done[faked_port_index]);
1715 bogus_port_status = true;
1716 goto cleanup;
1717 }
1718
Mathias Nymanb50107b2015-10-01 18:40:38 +03001719 if (hcd->speed < HCD_USB3)
Andiry Xu6fd45622011-09-23 14:19:50 -07001720 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1721 PORT_PLC);
1722
Andiry Xu56192532010-10-14 07:23:00 -07001723cleanup:
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001724 /* Update event ring dequeue pointer before dropping the lock */
Andiry Xu3b72fca2012-03-05 17:49:32 +08001725 inc_deq(xhci, xhci->event_ring);
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001726
Sarah Sharp386139d2011-03-24 08:02:58 -07001727 /* Don't make the USB core poll the roothub if we got a bad port status
1728 * change event. Besides, at that point we can't tell which roothub
1729 * (USB 2.0 or USB 3.0) to kick.
1730 */
1731 if (bogus_port_status)
1732 return;
1733
Sarah Sharpc52804a2012-11-27 12:30:23 -08001734 /*
1735 * xHCI port-status-change events occur when the "or" of all the
1736 * status-change bits in the portsc register changes from 0 to 1.
1737 * New status changes won't cause an event if any other change
1738 * bits are still set. When an event occurs, switch over to
1739 * polling to avoid losing status changes.
1740 */
1741 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1742 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001743 spin_unlock(&xhci->lock);
1744 /* Pass this up to the core */
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001745 usb_hcd_poll_rh_status(hcd);
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001746 spin_lock(&xhci->lock);
1747}
1748
1749/*
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001750 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1751 * at end_trb, which may be in another segment. If the suspect DMA address is a
1752 * TRB in this TD, this function returns that TRB's segment. Otherwise it
1753 * returns 0.
1754 */
Hans de Goedecffb9be2014-08-20 16:41:51 +03001755struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
1756 struct xhci_segment *start_seg,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001757 union xhci_trb *start_trb,
1758 union xhci_trb *end_trb,
Hans de Goedecffb9be2014-08-20 16:41:51 +03001759 dma_addr_t suspect_dma,
1760 bool debug)
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001761{
1762 dma_addr_t start_dma;
1763 dma_addr_t end_seg_dma;
1764 dma_addr_t end_trb_dma;
1765 struct xhci_segment *cur_seg;
1766
Sarah Sharp23e3be12009-04-29 19:05:20 -07001767 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001768 cur_seg = start_seg;
1769
1770 do {
Sarah Sharp2fa88da2009-11-03 22:02:24 -08001771 if (start_dma == 0)
Randy Dunlap326b4812010-04-19 08:53:50 -07001772 return NULL;
Sarah Sharpae636742009-04-29 19:02:31 -07001773 /* We may get an event for a Link TRB in the middle of a TD */
Sarah Sharp23e3be12009-04-29 19:05:20 -07001774 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
Sarah Sharp2fa88da2009-11-03 22:02:24 -08001775 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001776 /* If the end TRB isn't in this segment, this is set to 0 */
Sarah Sharp23e3be12009-04-29 19:05:20 -07001777 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001778
Hans de Goedecffb9be2014-08-20 16:41:51 +03001779 if (debug)
1780 xhci_warn(xhci,
1781 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
1782 (unsigned long long)suspect_dma,
1783 (unsigned long long)start_dma,
1784 (unsigned long long)end_trb_dma,
1785 (unsigned long long)cur_seg->dma,
1786 (unsigned long long)end_seg_dma);
1787
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001788 if (end_trb_dma > 0) {
1789 /* The end TRB is in this segment, so suspect should be here */
1790 if (start_dma <= end_trb_dma) {
1791 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1792 return cur_seg;
1793 } else {
1794 /* Case for one segment with
1795 * a TD wrapped around to the top
1796 */
1797 if ((suspect_dma >= start_dma &&
1798 suspect_dma <= end_seg_dma) ||
1799 (suspect_dma >= cur_seg->dma &&
1800 suspect_dma <= end_trb_dma))
1801 return cur_seg;
1802 }
Randy Dunlap326b4812010-04-19 08:53:50 -07001803 return NULL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001804 } else {
1805 /* Might still be somewhere in this segment */
1806 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1807 return cur_seg;
1808 }
1809 cur_seg = cur_seg->next;
Sarah Sharp23e3be12009-04-29 19:05:20 -07001810 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
Sarah Sharp2fa88da2009-11-03 22:02:24 -08001811 } while (cur_seg != start_seg);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001812
Randy Dunlap326b4812010-04-19 08:53:50 -07001813 return NULL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001814}
1815
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001816static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1817 unsigned int slot_id, unsigned int ep_index,
Lu Baolu5fee5a52018-03-16 16:32:59 +02001818 unsigned int stream_id, struct xhci_td *td,
Mathias Nyman5eee4b62017-06-15 11:55:45 +03001819 enum xhci_ep_reset_type reset_type)
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001820{
1821 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001822 struct xhci_command *command;
Mathias Nyman103afda2017-12-08 17:59:08 +02001823 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001824 if (!command)
1825 return;
1826
Mathias Nymand0167ad2015-03-10 19:49:00 +02001827 ep->ep_state |= EP_HALTED;
Sarah Sharp1624ae12010-05-06 13:40:08 -07001828
Mathias Nyman5eee4b62017-06-15 11:55:45 +03001829 xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
Sarah Sharp1624ae12010-05-06 13:40:08 -07001830
Mathias Nymand36374f2017-06-15 11:55:47 +03001831 if (reset_type == EP_HARD_RESET)
1832 xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td);
Sarah Sharp1624ae12010-05-06 13:40:08 -07001833
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001834 xhci_ring_cmd_db(xhci);
1835}
1836
1837/* Check if an error has halted the endpoint ring. The class driver will
1838 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1839 * However, a babble and other errors also halt the endpoint ring, and the class
1840 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1841 * Ring Dequeue Pointer command manually.
1842 */
1843static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1844 struct xhci_ep_ctx *ep_ctx,
1845 unsigned int trb_comp_code)
1846{
1847 /* TRB completion codes that may require a manual halt cleanup */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001848 if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
1849 trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
1850 trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
Rajesh Bhagatd4fc8bf2016-03-11 10:27:49 +05301851 /* The 0.95 spec says a babbling control endpoint
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001852 * is not halted. The 0.96 spec says it is. Some HW
1853 * claims to be 0.95 compliant, but it halts the control
1854 * endpoint anyway. Check if a babble halted the
1855 * endpoint.
1856 */
Mathias Nyman5071e6b2016-11-11 15:13:28 +02001857 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001858 return 1;
1859
1860 return 0;
1861}
1862
Sarah Sharpb45b5062009-12-09 15:59:06 -08001863int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1864{
1865 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1866 /* Vendor defined "informational" completion code,
1867 * treat as not-an-error.
1868 */
1869 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1870 trb_comp_code);
1871 xhci_dbg(xhci, "Treating code as success.\n");
1872 return 1;
1873 }
1874 return 0;
1875}
1876
Felipe Balbi55fa4392017-01-23 14:20:11 +02001877static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
1878 struct xhci_ring *ep_ring, int *status)
1879{
Felipe Balbi55fa4392017-01-23 14:20:11 +02001880 struct urb *urb = NULL;
1881
1882 /* Clean up the endpoint's TD list */
1883 urb = td->urb;
Felipe Balbi55fa4392017-01-23 14:20:11 +02001884
1885 /* if a bounce buffer was used to align this td then unmap it */
Felipe Balbia60f2f22017-01-23 14:20:14 +02001886 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
Felipe Balbi55fa4392017-01-23 14:20:11 +02001887
1888 /* Do one last check of the actual transfer length.
1889 * If the host controller said we transferred more data than the buffer
1890 * length, urb->actual_length will be a very big number (since it's
1891 * unsigned). Play it safe and say we didn't transfer anything.
1892 */
1893 if (urb->actual_length > urb->transfer_buffer_length) {
1894 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
1895 urb->transfer_buffer_length, urb->actual_length);
1896 urb->actual_length = 0;
1897 *status = 0;
1898 }
1899 list_del_init(&td->td_list);
1900 /* Was this TD slated to be cancelled but completed anyway? */
1901 if (!list_empty(&td->cancelled_td_list))
1902 list_del_init(&td->cancelled_td_list);
1903
1904 inc_td_cnt(urb);
1905 /* Giveback the urb when all the tds are completed */
1906 if (last_td_in_urb(td)) {
1907 if ((urb->actual_length != urb->transfer_buffer_length &&
1908 (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
1909 (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
1910 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
1911 urb, urb->actual_length,
1912 urb->transfer_buffer_length, *status);
1913
1914 /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
1915 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
1916 *status = 0;
1917 xhci_giveback_urb_in_irq(xhci, td, *status);
1918 }
1919
1920 return 0;
1921}
1922
Andiry Xu4422da62010-07-22 15:22:55 -07001923static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
Lu Baolu0c341912018-03-16 16:33:00 +02001924 struct xhci_transfer_event *event,
Mathias Nyman3134bc92017-06-15 11:55:48 +03001925 struct xhci_virt_ep *ep, int *status)
Andiry Xu4422da62010-07-22 15:22:55 -07001926{
1927 struct xhci_virt_device *xdev;
Andiry Xu4422da62010-07-22 15:22:55 -07001928 struct xhci_ep_ctx *ep_ctx;
Felipe Balbibe0f50c2017-01-23 14:20:10 +02001929 struct xhci_ring *ep_ring;
Felipe Balbibe0f50c2017-01-23 14:20:10 +02001930 unsigned int slot_id;
Andiry Xu4422da62010-07-22 15:22:55 -07001931 u32 trb_comp_code;
Felipe Balbibe0f50c2017-01-23 14:20:10 +02001932 int ep_index;
Andiry Xu4422da62010-07-22 15:22:55 -07001933
Matt Evans28ccd292011-03-29 13:40:46 +11001934 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Andiry Xu4422da62010-07-22 15:22:55 -07001935 xdev = xhci->devs[slot_id];
Matt Evans28ccd292011-03-29 13:40:46 +11001936 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1937 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
Andiry Xu4422da62010-07-22 15:22:55 -07001938 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +11001939 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Andiry Xu4422da62010-07-22 15:22:55 -07001940
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001941 if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
1942 trb_comp_code == COMP_STOPPED ||
1943 trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
Andiry Xu4422da62010-07-22 15:22:55 -07001944 /* The Endpoint Stop Command completion will take care of any
1945 * stopped TDs. A stopped TD may be restarted, so don't update
1946 * the ring dequeue pointer or take this TD off any lists yet.
1947 */
Andiry Xu4422da62010-07-22 15:22:55 -07001948 return 0;
Mathias Nyman69defe02014-11-27 18:19:14 +02001949 }
Felipe Balbi0b7c1052017-01-23 14:20:06 +02001950 if (trb_comp_code == COMP_STALL_ERROR ||
Mathias Nyman69defe02014-11-27 18:19:14 +02001951 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1952 trb_comp_code)) {
1953 /* Issue a reset endpoint command to clear the host side
1954 * halt, followed by a set dequeue command to move the
1955 * dequeue pointer past the TD.
1956 * The class driver clears the device side halt later.
1957 */
1958 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
Lu Baolu5fee5a52018-03-16 16:32:59 +02001959 ep_ring->stream_id, td, EP_HARD_RESET);
Andiry Xu4422da62010-07-22 15:22:55 -07001960 } else {
Mathias Nyman69defe02014-11-27 18:19:14 +02001961 /* Update ring dequeue pointer */
1962 while (ep_ring->dequeue != td->last_trb)
Andiry Xu3b72fca2012-03-05 17:49:32 +08001963 inc_deq(xhci, ep_ring);
Mathias Nyman69defe02014-11-27 18:19:14 +02001964 inc_deq(xhci, ep_ring);
1965 }
Andiry Xu4422da62010-07-22 15:22:55 -07001966
Felipe Balbi55fa4392017-01-23 14:20:11 +02001967 return xhci_td_cleanup(xhci, td, ep_ring, status);
Andiry Xu4422da62010-07-22 15:22:55 -07001968}
1969
Mathias Nyman30a65b42016-11-11 15:13:17 +02001970/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
1971static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
1972 union xhci_trb *stop_trb)
1973{
1974 u32 sum;
1975 union xhci_trb *trb = ring->dequeue;
1976 struct xhci_segment *seg = ring->deq_seg;
1977
1978 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
1979 if (!trb_is_noop(trb) && !trb_is_link(trb))
1980 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
1981 }
1982 return sum;
1983}
1984
Andiry Xu4422da62010-07-22 15:22:55 -07001985/*
Andiry Xu8af56be2010-07-22 15:23:03 -07001986 * Process control tds, update urb status and actual_length.
1987 */
1988static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
Mathias Nymanf97c08a2016-11-11 15:13:18 +02001989 union xhci_trb *ep_trb, struct xhci_transfer_event *event,
Andiry Xu8af56be2010-07-22 15:23:03 -07001990 struct xhci_virt_ep *ep, int *status)
1991{
1992 struct xhci_virt_device *xdev;
Andiry Xu8af56be2010-07-22 15:23:03 -07001993 unsigned int slot_id;
1994 int ep_index;
1995 struct xhci_ep_ctx *ep_ctx;
1996 u32 trb_comp_code;
Mathias Nyman0b6c3242016-11-11 15:13:16 +02001997 u32 remaining, requested;
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02001998 u32 trb_type;
Andiry Xu8af56be2010-07-22 15:23:03 -07001999
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002000 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
Matt Evans28ccd292011-03-29 13:40:46 +11002001 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Andiry Xu8af56be2010-07-22 15:23:03 -07002002 xdev = xhci->devs[slot_id];
Matt Evans28ccd292011-03-29 13:40:46 +11002003 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
Andiry Xu8af56be2010-07-22 15:23:03 -07002004 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +11002005 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002006 requested = td->urb->transfer_buffer_length;
2007 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2008
Andiry Xu8af56be2010-07-22 15:23:03 -07002009 switch (trb_comp_code) {
2010 case COMP_SUCCESS:
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002011 if (trb_type != TRB_STATUS) {
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002012 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002013 (trb_type == TRB_DATA) ? "data" : "setup");
Andiry Xu8af56be2010-07-22 15:23:03 -07002014 *status = -ESHUTDOWN;
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002015 break;
Andiry Xu8af56be2010-07-22 15:23:03 -07002016 }
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002017 *status = 0;
Andiry Xu8af56be2010-07-22 15:23:03 -07002018 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002019 case COMP_SHORT_PACKET:
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002020 *status = 0;
Andiry Xu8af56be2010-07-22 15:23:03 -07002021 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002022 case COMP_STOPPED_SHORT_PACKET:
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002023 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002024 td->urb->actual_length = remaining;
Lu Baolu40a3b772015-08-06 19:24:01 +03002025 else
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002026 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
2027 goto finish_td;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002028 case COMP_STOPPED:
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002029 switch (trb_type) {
2030 case TRB_SETUP:
2031 td->urb->actual_length = 0;
2032 goto finish_td;
2033 case TRB_DATA:
2034 case TRB_NORMAL:
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002035 td->urb->actual_length = requested - remaining;
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002036 goto finish_td;
Mathias Nyman0ab28812017-03-28 15:55:29 +03002037 case TRB_STATUS:
2038 td->urb->actual_length = requested;
2039 goto finish_td;
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002040 default:
2041 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
2042 trb_type);
2043 goto finish_td;
2044 }
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002045 case COMP_STOPPED_LENGTH_INVALID:
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002046 goto finish_td;
Andiry Xu8af56be2010-07-22 15:23:03 -07002047 default:
2048 if (!xhci_requires_manual_halt_cleanup(xhci,
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002049 ep_ctx, trb_comp_code))
Andiry Xu8af56be2010-07-22 15:23:03 -07002050 break;
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002051 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
2052 trb_comp_code, ep_index);
Andiry Xu8af56be2010-07-22 15:23:03 -07002053 /* else fall through */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002054 case COMP_STALL_ERROR:
Andiry Xu8af56be2010-07-22 15:23:03 -07002055 /* Did we transfer part of the data (middle) phase? */
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002056 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002057 td->urb->actual_length = requested - remaining;
Mathias Nyman22ae47e2015-05-29 17:01:53 +03002058 else if (!td->urb_length_set)
Andiry Xu8af56be2010-07-22 15:23:03 -07002059 td->urb->actual_length = 0;
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002060 goto finish_td;
Andiry Xu8af56be2010-07-22 15:23:03 -07002061 }
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002062
2063 /* stopped at setup stage, no data transferred */
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002064 if (trb_type == TRB_SETUP)
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002065 goto finish_td;
2066
Andiry Xu8af56be2010-07-22 15:23:03 -07002067 /*
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002068 * if on data stage then update the actual_length of the URB and flag it
2069 * as set, so it won't be overwritten in the event for the last TRB.
Andiry Xu8af56be2010-07-22 15:23:03 -07002070 */
Felipe Balbi29fc1aa2017-01-03 18:28:53 +02002071 if (trb_type == TRB_DATA ||
2072 trb_type == TRB_NORMAL) {
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002073 td->urb_length_set = true;
2074 td->urb->actual_length = requested - remaining;
2075 xhci_dbg(xhci, "Waiting for status stage event\n");
2076 return 0;
Andiry Xu8af56be2010-07-22 15:23:03 -07002077 }
2078
Mathias Nyman0b6c3242016-11-11 15:13:16 +02002079 /* at status stage */
2080 if (!td->urb_length_set)
2081 td->urb->actual_length = requested;
2082
2083finish_td:
Lu Baolu0c341912018-03-16 16:33:00 +02002084 return finish_td(xhci, td, event, ep, status);
Andiry Xu8af56be2010-07-22 15:23:03 -07002085}
2086
2087/*
Andiry Xu04e51902010-07-22 15:23:39 -07002088 * Process isochronous tds, update urb packet status and actual_length.
2089 */
2090static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002091 union xhci_trb *ep_trb, struct xhci_transfer_event *event,
Andiry Xu04e51902010-07-22 15:23:39 -07002092 struct xhci_virt_ep *ep, int *status)
2093{
2094 struct xhci_ring *ep_ring;
2095 struct urb_priv *urb_priv;
2096 int idx;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002097 struct usb_iso_packet_descriptor *frame;
Andiry Xu04e51902010-07-22 15:23:39 -07002098 u32 trb_comp_code;
Mathias Nyman36da3a12016-11-11 15:13:19 +02002099 bool sum_trbs_for_length = false;
2100 u32 remaining, requested, ep_trb_len;
2101 int short_framestatus;
Andiry Xu04e51902010-07-22 15:23:39 -07002102
Matt Evans28ccd292011-03-29 13:40:46 +11002103 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2104 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Andiry Xu04e51902010-07-22 15:23:39 -07002105 urb_priv = td->urb->hcpriv;
Mathias Nyman9ef7fbb2017-01-23 14:20:25 +02002106 idx = urb_priv->num_tds_done;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002107 frame = &td->urb->iso_frame_desc[idx];
Mathias Nyman36da3a12016-11-11 15:13:19 +02002108 requested = frame->length;
2109 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2110 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2111 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2112 -EREMOTEIO : 0;
Andiry Xu04e51902010-07-22 15:23:39 -07002113
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002114 /* handle completion code */
2115 switch (trb_comp_code) {
2116 case COMP_SUCCESS:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002117 if (remaining) {
2118 frame->status = short_framestatus;
2119 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2120 sum_trbs_for_length = true;
Sarah Sharp1530bbc62012-05-08 09:22:49 -07002121 break;
2122 }
Mathias Nyman36da3a12016-11-11 15:13:19 +02002123 frame->status = 0;
2124 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002125 case COMP_SHORT_PACKET:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002126 frame->status = short_framestatus;
2127 sum_trbs_for_length = true;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002128 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002129 case COMP_BANDWIDTH_OVERRUN_ERROR:
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002130 frame->status = -ECOMM;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002131 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002132 case COMP_ISOCH_BUFFER_OVERRUN:
2133 case COMP_BABBLE_DETECTED_ERROR:
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002134 frame->status = -EOVERFLOW;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002135 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002136 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2137 case COMP_STALL_ERROR:
Mathias Nymand104d012015-04-30 17:16:02 +03002138 frame->status = -EPROTO;
Mathias Nymand104d012015-04-30 17:16:02 +03002139 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002140 case COMP_USB_TRANSACTION_ERROR:
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002141 frame->status = -EPROTO;
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002142 if (ep_trb != td->last_trb)
Mathias Nymand104d012015-04-30 17:16:02 +03002143 return 0;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002144 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002145 case COMP_STOPPED:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002146 sum_trbs_for_length = true;
2147 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002148 case COMP_STOPPED_SHORT_PACKET:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002149 /* field normally containing residue now contains tranferred */
2150 frame->status = short_framestatus;
2151 requested = remaining;
2152 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002153 case COMP_STOPPED_LENGTH_INVALID:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002154 requested = 0;
2155 remaining = 0;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002156 break;
2157 default:
Mathias Nyman36da3a12016-11-11 15:13:19 +02002158 sum_trbs_for_length = true;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002159 frame->status = -1;
2160 break;
Andiry Xu04e51902010-07-22 15:23:39 -07002161 }
2162
Mathias Nyman36da3a12016-11-11 15:13:19 +02002163 if (sum_trbs_for_length)
2164 frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
2165 ep_trb_len - remaining;
2166 else
2167 frame->actual_length = requested;
Andiry Xu04e51902010-07-22 15:23:39 -07002168
Mathias Nyman36da3a12016-11-11 15:13:19 +02002169 td->urb->actual_length += frame->actual_length;
Andiry Xu04e51902010-07-22 15:23:39 -07002170
Lu Baolu0c341912018-03-16 16:33:00 +02002171 return finish_td(xhci, td, event, ep, status);
Andiry Xu04e51902010-07-22 15:23:39 -07002172}
2173
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002174static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2175 struct xhci_transfer_event *event,
2176 struct xhci_virt_ep *ep, int *status)
2177{
2178 struct xhci_ring *ep_ring;
2179 struct urb_priv *urb_priv;
2180 struct usb_iso_packet_descriptor *frame;
2181 int idx;
2182
Matt Evansf6975312011-06-01 13:01:01 +10002183 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002184 urb_priv = td->urb->hcpriv;
Mathias Nyman9ef7fbb2017-01-23 14:20:25 +02002185 idx = urb_priv->num_tds_done;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002186 frame = &td->urb->iso_frame_desc[idx];
2187
Sarah Sharpb3df3f92011-06-15 19:57:46 -07002188 /* The transfer is partly done. */
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002189 frame->status = -EXDEV;
2190
2191 /* calc actual length */
2192 frame->actual_length = 0;
2193
2194 /* Update ring dequeue pointer */
2195 while (ep_ring->dequeue != td->last_trb)
Andiry Xu3b72fca2012-03-05 17:49:32 +08002196 inc_deq(xhci, ep_ring);
2197 inc_deq(xhci, ep_ring);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002198
Mathias Nyman3134bc92017-06-15 11:55:48 +03002199 return xhci_td_cleanup(xhci, td, ep_ring, status);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002200}
2201
Andiry Xu04e51902010-07-22 15:23:39 -07002202/*
Andiry Xu22405ed2010-07-22 15:23:08 -07002203 * Process bulk and interrupt tds, update urb status and actual_length.
2204 */
2205static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002206 union xhci_trb *ep_trb, struct xhci_transfer_event *event,
Andiry Xu22405ed2010-07-22 15:23:08 -07002207 struct xhci_virt_ep *ep, int *status)
2208{
2209 struct xhci_ring *ep_ring;
Andiry Xu22405ed2010-07-22 15:23:08 -07002210 u32 trb_comp_code;
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002211 u32 remaining, requested, ep_trb_len;
Andiry Xu22405ed2010-07-22 15:23:08 -07002212
Matt Evans28ccd292011-03-29 13:40:46 +11002213 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2214 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Mathias Nyman30a65b42016-11-11 15:13:17 +02002215 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002216 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
Mathias Nyman30a65b42016-11-11 15:13:17 +02002217 requested = td->urb->transfer_buffer_length;
Andiry Xu22405ed2010-07-22 15:23:08 -07002218
2219 switch (trb_comp_code) {
2220 case COMP_SUCCESS:
Mathias Nyman30a65b42016-11-11 15:13:17 +02002221 /* handle success with untransferred data as short packet */
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002222 if (ep_trb != td->last_trb || remaining) {
Mathias Nyman52ab8682016-11-11 15:13:15 +02002223 xhci_warn(xhci, "WARN Successful completion on short TX\n");
Mathias Nyman30a65b42016-11-11 15:13:17 +02002224 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2225 td->urb->ep->desc.bEndpointAddress,
2226 requested, remaining);
Andiry Xu22405ed2010-07-22 15:23:08 -07002227 }
Mathias Nyman52ab8682016-11-11 15:13:15 +02002228 *status = 0;
Andiry Xu22405ed2010-07-22 15:23:08 -07002229 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002230 case COMP_SHORT_PACKET:
Mathias Nyman30a65b42016-11-11 15:13:17 +02002231 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2232 td->urb->ep->desc.bEndpointAddress,
2233 requested, remaining);
2234 *status = 0;
2235 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002236 case COMP_STOPPED_SHORT_PACKET:
Mathias Nyman30a65b42016-11-11 15:13:17 +02002237 td->urb->actual_length = remaining;
2238 goto finish_td;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002239 case COMP_STOPPED_LENGTH_INVALID:
Mathias Nyman30a65b42016-11-11 15:13:17 +02002240 /* stopped on ep trb with invalid length, exclude it */
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002241 ep_trb_len = 0;
Mathias Nyman30a65b42016-11-11 15:13:17 +02002242 remaining = 0;
Andiry Xu22405ed2010-07-22 15:23:08 -07002243 break;
2244 default:
Mathias Nyman30a65b42016-11-11 15:13:17 +02002245 /* do nothing */
Andiry Xu22405ed2010-07-22 15:23:08 -07002246 break;
2247 }
Mathias Nyman30a65b42016-11-11 15:13:17 +02002248
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002249 if (ep_trb == td->last_trb)
Mathias Nyman30a65b42016-11-11 15:13:17 +02002250 td->urb->actual_length = requested - remaining;
2251 else
Lu Baolu40a3b772015-08-06 19:24:01 +03002252 td->urb->actual_length =
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002253 sum_trb_lengths(xhci, ep_ring, ep_trb) +
2254 ep_trb_len - remaining;
Mathias Nyman30a65b42016-11-11 15:13:17 +02002255finish_td:
2256 if (remaining > requested) {
2257 xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
2258 remaining);
Andiry Xu22405ed2010-07-22 15:23:08 -07002259 td->urb->actual_length = 0;
Andiry Xu22405ed2010-07-22 15:23:08 -07002260 }
Lu Baolu0c341912018-03-16 16:33:00 +02002261 return finish_td(xhci, td, event, ep, status);
Andiry Xu22405ed2010-07-22 15:23:08 -07002262}
2263
2264/*
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002265 * If this function returns an error condition, it means it got a Transfer
2266 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2267 * At this point, the host controller is probably hosed and should be reset.
2268 */
2269static int handle_tx_event(struct xhci_hcd *xhci,
2270 struct xhci_transfer_event *event)
2271{
2272 struct xhci_virt_device *xdev;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002273 struct xhci_virt_ep *ep;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002274 struct xhci_ring *ep_ring;
Sarah Sharp82d10092009-08-07 14:04:52 -07002275 unsigned int slot_id;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002276 int ep_index;
Randy Dunlap326b4812010-04-19 08:53:50 -07002277 struct xhci_td *td = NULL;
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002278 dma_addr_t ep_trb_dma;
2279 struct xhci_segment *ep_seg;
2280 union xhci_trb *ep_trb;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002281 int status = -EINPROGRESS;
John Yound115b042009-07-27 12:05:15 -07002282 struct xhci_ep_ctx *ep_ctx;
Andiry Xuc2d7b492011-09-19 16:05:12 -07002283 struct list_head *tmp;
Sarah Sharp66d1eeb2009-08-27 14:35:53 -07002284 u32 trb_comp_code;
Andiry Xuc2d7b492011-09-19 16:05:12 -07002285 int td_num = 0;
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002286 bool handling_skipped_tds = false;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002287
Matt Evans28ccd292011-03-29 13:40:46 +11002288 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Mathias Nymanb3368382017-06-15 11:55:43 +03002289 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2290 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2291 ep_trb_dma = le64_to_cpu(event->buffer);
2292
Sarah Sharp82d10092009-08-07 14:04:52 -07002293 xdev = xhci->devs[slot_id];
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002294 if (!xdev) {
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002295 xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
2296 slot_id);
Mathias Nymanb3368382017-06-15 11:55:43 +03002297 goto err_out;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002298 }
2299
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002300 ep = &xdev->eps[ep_index];
Mathias Nymanb3368382017-06-15 11:55:43 +03002301 ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
John Yound115b042009-07-27 12:05:15 -07002302 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Mathias Nymanb3368382017-06-15 11:55:43 +03002303
Mathias Nymanade2e3a2017-06-15 11:55:46 +03002304 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002305 xhci_err(xhci,
Mathias Nymanade2e3a2017-06-15 11:55:46 +03002306 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002307 slot_id, ep_index);
Mathias Nymanb3368382017-06-15 11:55:43 +03002308 goto err_out;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002309 }
2310
Mathias Nymanade2e3a2017-06-15 11:55:46 +03002311 /* Some transfer events don't always point to a trb, see xhci 4.17.4 */
2312 if (!ep_ring) {
2313 switch (trb_comp_code) {
2314 case COMP_STALL_ERROR:
2315 case COMP_USB_TRANSACTION_ERROR:
2316 case COMP_INVALID_STREAM_TYPE_ERROR:
2317 case COMP_INVALID_STREAM_ID_ERROR:
2318 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0,
Lu Baolu5fee5a52018-03-16 16:32:59 +02002319 NULL, EP_SOFT_RESET);
Mathias Nymanade2e3a2017-06-15 11:55:46 +03002320 goto cleanup;
2321 case COMP_RING_UNDERRUN:
2322 case COMP_RING_OVERRUN:
2323 goto cleanup;
2324 default:
2325 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
2326 slot_id, ep_index);
2327 goto err_out;
2328 }
2329 }
2330
Andiry Xuc2d7b492011-09-19 16:05:12 -07002331 /* Count current td numbers if ep->skip is set */
2332 if (ep->skip) {
2333 list_for_each(tmp, &ep_ring->td_list)
2334 td_num++;
2335 }
2336
Andiry Xu986a92d2010-07-22 15:23:20 -07002337 /* Look for common error cases */
Sarah Sharp66d1eeb2009-08-27 14:35:53 -07002338 switch (trb_comp_code) {
Sarah Sharpb10de142009-04-27 19:58:50 -07002339 /* Skip codes that require special handling depending on
2340 * transfer type
2341 */
2342 case COMP_SUCCESS:
Vivek Gautam1c11a172013-03-21 12:06:48 +05302343 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
Sarah Sharp1530bbc62012-05-08 09:22:49 -07002344 break;
2345 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002346 trb_comp_code = COMP_SHORT_PACKET;
Sarah Sharp1530bbc62012-05-08 09:22:49 -07002347 else
Sarah Sharp8202ce22012-07-25 10:52:45 -07002348 xhci_warn_ratelimited(xhci,
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002349 "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
2350 slot_id, ep_index);
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002351 case COMP_SHORT_PACKET:
Sarah Sharpb10de142009-04-27 19:58:50 -07002352 break;
Mathias Nymanb3368382017-06-15 11:55:43 +03002353 /* Completion codes for endpoint stopped state */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002354 case COMP_STOPPED:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002355 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
2356 slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -07002357 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002358 case COMP_STOPPED_LENGTH_INVALID:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002359 xhci_dbg(xhci,
2360 "Stopped on No-op or Link TRB for slot %u ep %u\n",
2361 slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -07002362 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002363 case COMP_STOPPED_SHORT_PACKET:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002364 xhci_dbg(xhci,
2365 "Stopped with short packet transfer detected for slot %u ep %u\n",
2366 slot_id, ep_index);
Lu Baolu40a3b772015-08-06 19:24:01 +03002367 break;
Mathias Nymanb3368382017-06-15 11:55:43 +03002368 /* Completion codes for endpoint halted state */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002369 case COMP_STALL_ERROR:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002370 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
2371 ep_index);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002372 ep->ep_state |= EP_HALTED;
Sarah Sharpb10de142009-04-27 19:58:50 -07002373 status = -EPIPE;
2374 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002375 case COMP_SPLIT_TRANSACTION_ERROR:
2376 case COMP_USB_TRANSACTION_ERROR:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002377 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
2378 slot_id, ep_index);
Sarah Sharpb10de142009-04-27 19:58:50 -07002379 status = -EPROTO;
2380 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002381 case COMP_BABBLE_DETECTED_ERROR:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002382 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
2383 slot_id, ep_index);
Sarah Sharp4a731432009-07-27 12:04:32 -07002384 status = -EOVERFLOW;
2385 break;
Mathias Nymanb3368382017-06-15 11:55:43 +03002386 /* Completion codes for endpoint error state */
2387 case COMP_TRB_ERROR:
2388 xhci_warn(xhci,
2389 "WARN: TRB error for slot %u ep %u on endpoint\n",
2390 slot_id, ep_index);
2391 status = -EILSEQ;
2392 break;
2393 /* completion codes not indicating endpoint state change */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002394 case COMP_DATA_BUFFER_ERROR:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002395 xhci_warn(xhci,
2396 "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
2397 slot_id, ep_index);
Sarah Sharpb10de142009-04-27 19:58:50 -07002398 status = -ENOSR;
2399 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002400 case COMP_BANDWIDTH_OVERRUN_ERROR:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002401 xhci_warn(xhci,
2402 "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
2403 slot_id, ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002404 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002405 case COMP_ISOCH_BUFFER_OVERRUN:
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002406 xhci_warn(xhci,
2407 "WARN: buffer overrun event for slot %u ep %u on endpoint",
2408 slot_id, ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002409 break;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002410 case COMP_RING_UNDERRUN:
Andiry Xu986a92d2010-07-22 15:23:20 -07002411 /*
2412 * When the Isoch ring is empty, the xHC will generate
2413 * a Ring Overrun Event for IN Isoch endpoint or Ring
2414 * Underrun Event for OUT Isoch endpoint.
2415 */
2416 xhci_dbg(xhci, "underrun event on endpoint\n");
2417 if (!list_empty(&ep_ring->td_list))
2418 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2419 "still with TDs queued?\n",
Matt Evans28ccd292011-03-29 13:40:46 +11002420 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2421 ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002422 goto cleanup;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002423 case COMP_RING_OVERRUN:
Andiry Xu986a92d2010-07-22 15:23:20 -07002424 xhci_dbg(xhci, "overrun event on endpoint\n");
2425 if (!list_empty(&ep_ring->td_list))
2426 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2427 "still with TDs queued?\n",
Matt Evans28ccd292011-03-29 13:40:46 +11002428 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2429 ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002430 goto cleanup;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002431 case COMP_MISSED_SERVICE_ERROR:
Andiry Xud18240d2010-07-22 15:23:25 -07002432 /*
2433 * When encounter missed service error, one or more isoc tds
2434 * may be missed by xHC.
2435 * Set skip flag of the ep_ring; Complete the missed tds as
2436 * short transfer when process the ep_ring next time.
2437 */
2438 ep->skip = true;
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002439 xhci_dbg(xhci,
2440 "Miss service interval error for slot %u ep %u, set skip flag\n",
2441 slot_id, ep_index);
Andiry Xud18240d2010-07-22 15:23:25 -07002442 goto cleanup;
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002443 case COMP_NO_PING_RESPONSE_ERROR:
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002444 ep->skip = true;
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002445 xhci_dbg(xhci,
2446 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
2447 slot_id, ep_index);
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002448 goto cleanup;
Mathias Nymanb3368382017-06-15 11:55:43 +03002449
2450 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2451 /* needs disable slot command to recover */
2452 xhci_warn(xhci,
2453 "WARN: detect an incompatible device for slot %u ep %u",
2454 slot_id, ep_index);
2455 status = -EPROTO;
2456 break;
Sarah Sharpb10de142009-04-27 19:58:50 -07002457 default:
Sarah Sharpb45b5062009-12-09 15:59:06 -08002458 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
Sarah Sharp5ad6a522009-11-11 10:28:40 -08002459 status = 0;
2460 break;
2461 }
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002462 xhci_warn(xhci,
2463 "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
2464 trb_comp_code, slot_id, ep_index);
Sarah Sharpb10de142009-04-27 19:58:50 -07002465 goto cleanup;
2466 }
Andiry Xu986a92d2010-07-22 15:23:20 -07002467
Andiry Xud18240d2010-07-22 15:23:25 -07002468 do {
2469 /* This TRB should be in the TD at the head of this ring's
2470 * TD list.
2471 */
2472 if (list_empty(&ep_ring->td_list)) {
Sarah Sharpa83d6752013-03-18 10:19:51 -07002473 /*
Mathias Nymane4ec40e2017-12-01 13:41:19 +02002474 * Don't print wanings if it's due to a stopped endpoint
2475 * generating an extra completion event if the device
2476 * was suspended. Or, a event for the last TRB of a
2477 * short TD we already got a short event for.
2478 * The short TD is already removed from the TD list.
Sarah Sharpa83d6752013-03-18 10:19:51 -07002479 */
Mathias Nymane4ec40e2017-12-01 13:41:19 +02002480
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002481 if (!(trb_comp_code == COMP_STOPPED ||
Mathias Nymane4ec40e2017-12-01 13:41:19 +02002482 trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
2483 ep_ring->last_td_was_short)) {
Sarah Sharpa83d6752013-03-18 10:19:51 -07002484 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2485 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2486 ep_index);
Sarah Sharpa83d6752013-03-18 10:19:51 -07002487 }
Andiry Xud18240d2010-07-22 15:23:25 -07002488 if (ep->skip) {
2489 ep->skip = false;
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002490 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
2491 slot_id, ep_index);
Andiry Xud18240d2010-07-22 15:23:25 -07002492 }
Andiry Xud18240d2010-07-22 15:23:25 -07002493 goto cleanup;
2494 }
Andiry Xu986a92d2010-07-22 15:23:20 -07002495
Andiry Xuc2d7b492011-09-19 16:05:12 -07002496 /* We've skipped all the TDs on the ep ring when ep->skip set */
2497 if (ep->skip && td_num == 0) {
2498 ep->skip = false;
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002499 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
2500 slot_id, ep_index);
Andiry Xuc2d7b492011-09-19 16:05:12 -07002501 goto cleanup;
2502 }
2503
Felipe Balbi04861f82017-01-23 14:20:09 +02002504 td = list_first_entry(&ep_ring->td_list, struct xhci_td,
2505 td_list);
Andiry Xuc2d7b492011-09-19 16:05:12 -07002506 if (ep->skip)
2507 td_num--;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002508
Andiry Xud18240d2010-07-22 15:23:25 -07002509 /* Is this a TRB in the currently executing TD? */
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002510 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2511 td->last_trb, ep_trb_dma, false);
Alex Hee1cf4862011-06-03 15:58:25 +08002512
2513 /*
2514 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2515 * is not in the current TD pointed by ep_ring->dequeue because
2516 * that the hardware dequeue pointer still at the previous TRB
2517 * of the current TD. The previous TRB maybe a Link TD or the
2518 * last TRB of the previous TD. The command completion handle
2519 * will take care the rest.
2520 */
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002521 if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
2522 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
Alex Hee1cf4862011-06-03 15:58:25 +08002523 goto cleanup;
2524 }
2525
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002526 if (!ep_seg) {
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002527 if (!ep->skip ||
2528 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
Sarah Sharpad808332011-05-25 10:43:56 -07002529 /* Some host controllers give a spurious
2530 * successful event after a short transfer.
2531 * Ignore it.
2532 */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03002533 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
Sarah Sharpad808332011-05-25 10:43:56 -07002534 ep_ring->last_td_was_short) {
2535 ep_ring->last_td_was_short = false;
Sarah Sharpad808332011-05-25 10:43:56 -07002536 goto cleanup;
2537 }
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002538 /* HC is busted, give up! */
2539 xhci_err(xhci,
2540 "ERROR Transfer event TRB DMA ptr not "
Hans de Goedecffb9be2014-08-20 16:41:51 +03002541 "part of current TD ep_index %d "
2542 "comp_code %u\n", ep_index,
2543 trb_comp_code);
2544 trb_in_td(xhci, ep_ring->deq_seg,
2545 ep_ring->dequeue, td->last_trb,
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002546 ep_trb_dma, true);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002547 return -ESHUTDOWN;
2548 }
2549
Mathias Nyman0c03d892016-11-11 15:13:23 +02002550 skip_isoc_td(xhci, td, event, ep, &status);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002551 goto cleanup;
2552 }
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002553 if (trb_comp_code == COMP_SHORT_PACKET)
Sarah Sharpad808332011-05-25 10:43:56 -07002554 ep_ring->last_td_was_short = true;
2555 else
2556 ep_ring->last_td_was_short = false;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002557
2558 if (ep->skip) {
Zhengjun Xingb7f769a2017-04-07 17:56:59 +03002559 xhci_dbg(xhci,
2560 "Found td. Clear skip flag for slot %u ep %u.\n",
2561 slot_id, ep_index);
Andiry Xud18240d2010-07-22 15:23:25 -07002562 ep->skip = false;
2563 }
Andiry Xu986a92d2010-07-22 15:23:20 -07002564
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002565 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
2566 sizeof(*ep_trb)];
Felipe Balbia37c3f72017-01-23 14:20:19 +02002567
2568 trace_xhci_handle_transfer(ep_ring,
2569 (struct xhci_generic_trb *) ep_trb);
2570
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002571 /*
Lu Baolu810a6242017-10-06 17:45:29 +03002572 * No-op TRB could trigger interrupts in a case where
2573 * a URB was killed and a STALL_ERROR happens right
2574 * after the endpoint ring stopped. Reset the halted
2575 * endpoint. Otherwise, the endpoint remains stalled
2576 * indefinitely.
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002577 */
Mathias Nymanf97c08a2016-11-11 15:13:18 +02002578 if (trb_is_noop(ep_trb)) {
Lu Baolu810a6242017-10-06 17:45:29 +03002579 if (trb_comp_code == COMP_STALL_ERROR ||
2580 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2581 trb_comp_code))
2582 xhci_cleanup_halted_endpoint(xhci, slot_id,
2583 ep_index,
2584 ep_ring->stream_id,
Lu Baolu5fee5a52018-03-16 16:32:59 +02002585 td, EP_HARD_RESET);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002586 goto cleanup;
Andiry Xud18240d2010-07-22 15:23:25 -07002587 }
2588
Mathias Nyman0c03d892016-11-11 15:13:23 +02002589 /* update the urb's actual_length and give back to the core */
Andiry Xud18240d2010-07-22 15:23:25 -07002590 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
Mathias Nyman0c03d892016-11-11 15:13:23 +02002591 process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
Andiry Xu04e51902010-07-22 15:23:39 -07002592 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
Mathias Nyman0c03d892016-11-11 15:13:23 +02002593 process_isoc_td(xhci, td, ep_trb, event, ep, &status);
Andiry Xud18240d2010-07-22 15:23:25 -07002594 else
Mathias Nyman0c03d892016-11-11 15:13:23 +02002595 process_bulk_intr_td(xhci, td, ep_trb, event, ep,
2596 &status);
Andiry Xu4422da62010-07-22 15:22:55 -07002597cleanup:
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002598 handling_skipped_tds = ep->skip &&
Felipe Balbi0b7c1052017-01-23 14:20:06 +02002599 trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
2600 trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002601
Andiry Xud18240d2010-07-22 15:23:25 -07002602 /*
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002603 * Do not update event ring dequeue pointer if we're in a loop
2604 * processing missed tds.
Sarah Sharp82d10092009-08-07 14:04:52 -07002605 */
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002606 if (!handling_skipped_tds)
Andiry Xu3b72fca2012-03-05 17:49:32 +08002607 inc_deq(xhci, xhci->event_ring);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002608
Andiry Xud18240d2010-07-22 15:23:25 -07002609 /*
2610 * If ep->skip is set, it means there are missed tds on the
2611 * endpoint ring need to take care of.
2612 * Process them as short transfer until reach the td pointed by
2613 * the event.
2614 */
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002615 } while (handling_skipped_tds);
Andiry Xud18240d2010-07-22 15:23:25 -07002616
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002617 return 0;
Mathias Nymanb3368382017-06-15 11:55:43 +03002618
2619err_out:
2620 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2621 (unsigned long long) xhci_trb_virt_to_dma(
2622 xhci->event_ring->deq_seg,
2623 xhci->event_ring->dequeue),
2624 lower_32_bits(le64_to_cpu(event->buffer)),
2625 upper_32_bits(le64_to_cpu(event->buffer)),
2626 le32_to_cpu(event->transfer_len),
2627 le32_to_cpu(event->flags));
2628 return -ENODEV;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002629}
2630
2631/*
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002632 * This function handles all OS-owned events on the event ring. It may drop
2633 * xhci->lock between event processing (e.g. to pass up port status changes).
Matt Evans9dee9a22011-03-29 13:41:02 +11002634 * Returns >0 for "possibly more events to process" (caller should call again),
2635 * otherwise 0 if done. In future, <0 returns should indicate error code.
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002636 */
Matt Evans9dee9a22011-03-29 13:41:02 +11002637static int xhci_handle_event(struct xhci_hcd *xhci)
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002638{
2639 union xhci_trb *event;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002640 int update_ptrs = 1;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002641 int ret;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002642
Lu Baoluf4c8f032016-11-11 15:13:25 +02002643 /* Event ring hasn't been allocated yet. */
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002644 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
Lu Baoluf4c8f032016-11-11 15:13:25 +02002645 xhci_err(xhci, "ERROR event ring not ready\n");
2646 return -ENOMEM;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002647 }
2648
2649 event = xhci->event_ring->dequeue;
2650 /* Does the HC or OS own the TRB? */
Matt Evans28ccd292011-03-29 13:40:46 +11002651 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
Lu Baoluf4c8f032016-11-11 15:13:25 +02002652 xhci->event_ring->cycle_state)
Matt Evans9dee9a22011-03-29 13:41:02 +11002653 return 0;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002654
Felipe Balbia37c3f72017-01-23 14:20:19 +02002655 trace_xhci_handle_event(xhci->event_ring, &event->generic);
2656
Matt Evans92a3da42011-03-29 13:40:51 +11002657 /*
2658 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2659 * speculative reads of the event's flags/data below.
2660 */
2661 rmb();
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002662 /* FIXME: Handle more event types. */
Lu Baoluf4c8f032016-11-11 15:13:25 +02002663 switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002664 case TRB_TYPE(TRB_COMPLETION):
2665 handle_cmd_completion(xhci, &event->event_cmd);
2666 break;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002667 case TRB_TYPE(TRB_PORT_STATUS):
2668 handle_port_status(xhci, event);
2669 update_ptrs = 0;
2670 break;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002671 case TRB_TYPE(TRB_TRANSFER):
2672 ret = handle_tx_event(xhci, &event->trans_event);
Lu Baoluf4c8f032016-11-11 15:13:25 +02002673 if (ret >= 0)
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002674 update_ptrs = 0;
2675 break;
Sarah Sharp623bef92011-11-11 14:57:33 -08002676 case TRB_TYPE(TRB_DEV_NOTE):
2677 handle_device_notification(xhci, event);
2678 break;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002679 default:
Matt Evans28ccd292011-03-29 13:40:46 +11002680 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2681 TRB_TYPE(48))
Sarah Sharp02386342010-05-24 13:25:28 -07002682 handle_vendor_event(xhci, event);
2683 else
Lu Baoluf4c8f032016-11-11 15:13:25 +02002684 xhci_warn(xhci, "ERROR unknown event type %d\n",
2685 TRB_FIELD_TO_TYPE(
2686 le32_to_cpu(event->event_cmd.flags)));
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002687 }
Sarah Sharp6f5165c2009-10-27 10:57:01 -07002688 /* Any of the above functions may drop and re-acquire the lock, so check
2689 * to make sure a watchdog timer didn't mark the host as non-responsive.
2690 */
2691 if (xhci->xhc_state & XHCI_STATE_DYING) {
2692 xhci_dbg(xhci, "xHCI host dying, returning from "
2693 "event handler.\n");
Matt Evans9dee9a22011-03-29 13:41:02 +11002694 return 0;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07002695 }
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002696
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002697 if (update_ptrs)
2698 /* Update SW event ring dequeue pointer */
Andiry Xu3b72fca2012-03-05 17:49:32 +08002699 inc_deq(xhci, xhci->event_ring);
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002700
Matt Evans9dee9a22011-03-29 13:41:02 +11002701 /* Are there more items on the event ring? Caller will call us again to
2702 * check.
2703 */
2704 return 1;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002705}
Sarah Sharp9032cd52010-07-29 22:12:29 -07002706
2707/*
2708 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2709 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2710 * indicators of an event TRB error, but we check the status *first* to be safe.
2711 */
2712irqreturn_t xhci_irq(struct usb_hcd *hcd)
2713{
2714 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002715 union xhci_trb *event_ring_deq;
Felipe Balbi76a35292017-01-23 14:20:07 +02002716 irqreturn_t ret = IRQ_NONE;
Alan Stern63aea0d2017-05-17 18:32:03 +03002717 unsigned long flags;
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002718 dma_addr_t deq;
Felipe Balbi76a35292017-01-23 14:20:07 +02002719 u64 temp_64;
2720 u32 status;
Sarah Sharp9032cd52010-07-29 22:12:29 -07002721
Alan Stern63aea0d2017-05-17 18:32:03 +03002722 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp9032cd52010-07-29 22:12:29 -07002723 /* Check if the xHC generated the interrupt, or the irq is shared */
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +02002724 status = readl(&xhci->op_regs->status);
Mathias Nymand9f11ba2017-04-07 17:57:01 +03002725 if (status == ~(u32)0) {
2726 xhci_hc_died(xhci);
Felipe Balbi76a35292017-01-23 14:20:07 +02002727 ret = IRQ_HANDLED;
2728 goto out;
Sarah Sharp9032cd52010-07-29 22:12:29 -07002729 }
Felipe Balbi76a35292017-01-23 14:20:07 +02002730
2731 if (!(status & STS_EINT))
2732 goto out;
2733
Sarah Sharp27e0dd42010-07-29 22:12:43 -07002734 if (status & STS_FATAL) {
Sarah Sharp9032cd52010-07-29 22:12:29 -07002735 xhci_warn(xhci, "WARNING: Host System Error\n");
2736 xhci_halt(xhci);
Felipe Balbi76a35292017-01-23 14:20:07 +02002737 ret = IRQ_HANDLED;
2738 goto out;
Sarah Sharp9032cd52010-07-29 22:12:29 -07002739 }
2740
Sarah Sharpbda53142010-07-29 22:12:38 -07002741 /*
2742 * Clear the op reg interrupt status first,
2743 * so we can receive interrupts from other MSI-X interrupters.
2744 * Write 1 to clear the interrupt status.
2745 */
Sarah Sharp27e0dd42010-07-29 22:12:43 -07002746 status |= STS_EINT;
Xenia Ragiadakou204b7792013-11-15 05:34:07 +02002747 writel(status, &xhci->op_regs->status);
Sarah Sharpbda53142010-07-29 22:12:38 -07002748
Peter Chen6a29bee2017-05-17 18:32:02 +03002749 if (!hcd->msi_enabled) {
Sarah Sharpc21599a2010-07-29 22:13:00 -07002750 u32 irq_pending;
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +02002751 irq_pending = readl(&xhci->ir_set->irq_pending);
Felipe Balbi4e833c02012-03-15 16:37:08 +02002752 irq_pending |= IMAN_IP;
Xenia Ragiadakou204b7792013-11-15 05:34:07 +02002753 writel(irq_pending, &xhci->ir_set->irq_pending);
Sarah Sharpc21599a2010-07-29 22:13:00 -07002754 }
Sarah Sharpbda53142010-07-29 22:12:38 -07002755
Gabriel Krisman Bertazi27a41a82016-06-01 18:09:07 +03002756 if (xhci->xhc_state & XHCI_STATE_DYING ||
2757 xhci->xhc_state & XHCI_STATE_HALTED) {
Sarah Sharpbda53142010-07-29 22:12:38 -07002758 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2759 "Shouldn't IRQs be disabled?\n");
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002760 /* Clear the event handler busy flag (RW1C);
2761 * the event ring should be empty.
Sarah Sharpbda53142010-07-29 22:12:38 -07002762 */
Sarah Sharpf7b2e402014-01-30 13:27:49 -08002763 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
Sarah Sharp477632d2014-01-29 14:02:00 -08002764 xhci_write_64(xhci, temp_64 | ERST_EHB,
2765 &xhci->ir_set->erst_dequeue);
Felipe Balbi76a35292017-01-23 14:20:07 +02002766 ret = IRQ_HANDLED;
2767 goto out;
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002768 }
2769
2770 event_ring_deq = xhci->event_ring->dequeue;
2771 /* FIXME this should be a delayed service routine
2772 * that clears the EHB.
2773 */
Matt Evans9dee9a22011-03-29 13:41:02 +11002774 while (xhci_handle_event(xhci) > 0) {}
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002775
Sarah Sharpf7b2e402014-01-30 13:27:49 -08002776 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002777 /* If necessary, update the HW's version of the event ring deq ptr. */
2778 if (event_ring_deq != xhci->event_ring->dequeue) {
2779 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2780 xhci->event_ring->dequeue);
2781 if (deq == 0)
2782 xhci_warn(xhci, "WARN something wrong with SW event "
2783 "ring dequeue ptr.\n");
2784 /* Update HC event ring dequeue pointer */
2785 temp_64 &= ERST_PTR_MASK;
2786 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2787 }
Sarah Sharpbda53142010-07-29 22:12:38 -07002788
2789 /* Clear the event handler busy flag (RW1C); event ring is empty. */
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002790 temp_64 |= ERST_EHB;
Sarah Sharp477632d2014-01-29 14:02:00 -08002791 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
Felipe Balbi76a35292017-01-23 14:20:07 +02002792 ret = IRQ_HANDLED;
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002793
Felipe Balbi76a35292017-01-23 14:20:07 +02002794out:
Alan Stern63aea0d2017-05-17 18:32:03 +03002795 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp9032cd52010-07-29 22:12:29 -07002796
Felipe Balbi76a35292017-01-23 14:20:07 +02002797 return ret;
Sarah Sharp9032cd52010-07-29 22:12:29 -07002798}
2799
Alex Shi851ec162013-05-24 10:54:19 +08002800irqreturn_t xhci_msi_irq(int irq, void *hcd)
Sarah Sharp9032cd52010-07-29 22:12:29 -07002801{
Alan Stern968b8222011-11-03 12:03:38 -04002802 return xhci_irq(hcd);
Sarah Sharp9032cd52010-07-29 22:12:29 -07002803}
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002804
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002805/**** Endpoint Ring Operations ****/
2806
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002807/*
2808 * Generic function for queueing a TRB on a ring.
2809 * The caller must have checked to make sure there's room on the ring.
Sarah Sharp6cc30d82010-06-10 12:25:28 -07002810 *
2811 * @more_trbs_coming: Will you enqueue more TRBs before calling
2812 * prepare_transfer()?
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002813 */
2814static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
Andiry Xu3b72fca2012-03-05 17:49:32 +08002815 bool more_trbs_coming,
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002816 u32 field1, u32 field2, u32 field3, u32 field4)
2817{
2818 struct xhci_generic_trb *trb;
2819
2820 trb = &ring->enqueue->generic;
Matt Evans28ccd292011-03-29 13:40:46 +11002821 trb->field[0] = cpu_to_le32(field1);
2822 trb->field[1] = cpu_to_le32(field2);
2823 trb->field[2] = cpu_to_le32(field3);
2824 trb->field[3] = cpu_to_le32(field4);
Felipe Balbia37c3f72017-01-23 14:20:19 +02002825
2826 trace_xhci_queue_trb(ring, trb);
2827
Andiry Xu3b72fca2012-03-05 17:49:32 +08002828 inc_enq(xhci, ring, more_trbs_coming);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002829}
2830
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002831/*
2832 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2833 * FIXME allocate segments if the ring is full.
2834 */
2835static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
Andiry Xu3b72fca2012-03-05 17:49:32 +08002836 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002837{
Andiry Xu8dfec612012-03-05 17:49:37 +08002838 unsigned int num_trbs_needed;
2839
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002840 /* Make sure the endpoint has been added to xHC schedule */
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002841 switch (ep_state) {
2842 case EP_STATE_DISABLED:
2843 /*
2844 * USB core changed config/interfaces without notifying us,
2845 * or hardware is reporting the wrong state.
2846 */
2847 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2848 return -ENOENT;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002849 case EP_STATE_ERROR:
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07002850 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002851 /* FIXME event handling code for error needs to clear it */
2852 /* XXX not sure if this should be -ENOENT or not */
2853 return -EINVAL;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07002854 case EP_STATE_HALTED:
2855 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002856 case EP_STATE_STOPPED:
2857 case EP_STATE_RUNNING:
2858 break;
2859 default:
2860 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2861 /*
2862 * FIXME issue Configure Endpoint command to try to get the HC
2863 * back into a known state.
2864 */
2865 return -EINVAL;
2866 }
Andiry Xu8dfec612012-03-05 17:49:37 +08002867
2868 while (1) {
Sarah Sharp3d4b81e2014-01-31 11:52:57 -08002869 if (room_on_ring(xhci, ep_ring, num_trbs))
2870 break;
Andiry Xu8dfec612012-03-05 17:49:37 +08002871
2872 if (ep_ring == xhci->cmd_ring) {
2873 xhci_err(xhci, "Do not support expand command ring\n");
2874 return -ENOMEM;
2875 }
2876
Xenia Ragiadakou68ffb012013-08-14 06:33:56 +03002877 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2878 "ERROR no room on ep ring, try ring expansion");
Andiry Xu8dfec612012-03-05 17:49:37 +08002879 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2880 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2881 mem_flags)) {
2882 xhci_err(xhci, "Ring expansion failed\n");
2883 return -ENOMEM;
2884 }
Peter Senna Tschudin261fa122012-09-12 19:03:17 +02002885 }
John Youn6c12db92010-05-10 15:33:00 -07002886
Mathias Nymand0c77d82016-06-21 10:58:07 +03002887 while (trb_is_link(ep_ring->enqueue)) {
2888 /* If we're not dealing with 0.95 hardware or isoc rings
2889 * on AMD 0.96 host, clear the chain bit.
2890 */
2891 if (!xhci_link_trb_quirk(xhci) &&
2892 !(ep_ring->type == TYPE_ISOC &&
2893 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2894 ep_ring->enqueue->link.control &=
2895 cpu_to_le32(~TRB_CHAIN);
2896 else
2897 ep_ring->enqueue->link.control |=
2898 cpu_to_le32(TRB_CHAIN);
John Youn6c12db92010-05-10 15:33:00 -07002899
Mathias Nymand0c77d82016-06-21 10:58:07 +03002900 wmb();
2901 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
John Youn6c12db92010-05-10 15:33:00 -07002902
Mathias Nymand0c77d82016-06-21 10:58:07 +03002903 /* Toggle the cycle bit after the last ring segment. */
2904 if (link_trb_toggles_cycle(ep_ring->enqueue))
2905 ep_ring->cycle_state ^= 1;
John Youn6c12db92010-05-10 15:33:00 -07002906
Mathias Nymand0c77d82016-06-21 10:58:07 +03002907 ep_ring->enq_seg = ep_ring->enq_seg->next;
2908 ep_ring->enqueue = ep_ring->enq_seg->trbs;
John Youn6c12db92010-05-10 15:33:00 -07002909 }
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002910 return 0;
2911}
2912
Sarah Sharp23e3be12009-04-29 19:05:20 -07002913static int prepare_transfer(struct xhci_hcd *xhci,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002914 struct xhci_virt_device *xdev,
2915 unsigned int ep_index,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002916 unsigned int stream_id,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002917 unsigned int num_trbs,
2918 struct urb *urb,
Andiry Xu8e51adc2010-07-22 15:23:31 -07002919 unsigned int td_index,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002920 gfp_t mem_flags)
2921{
2922 int ret;
Andiry Xu8e51adc2010-07-22 15:23:31 -07002923 struct urb_priv *urb_priv;
2924 struct xhci_td *td;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002925 struct xhci_ring *ep_ring;
John Yound115b042009-07-27 12:05:15 -07002926 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002927
2928 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2929 if (!ep_ring) {
2930 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2931 stream_id);
2932 return -EINVAL;
2933 }
2934
Mathias Nyman5071e6b2016-11-11 15:13:28 +02002935 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
Andiry Xu3b72fca2012-03-05 17:49:32 +08002936 num_trbs, mem_flags);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002937 if (ret)
2938 return ret;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002939
Andiry Xu8e51adc2010-07-22 15:23:31 -07002940 urb_priv = urb->hcpriv;
Mathias Nyman7e64b032017-01-23 14:20:26 +02002941 td = &urb_priv->td[td_index];
Andiry Xu8e51adc2010-07-22 15:23:31 -07002942
2943 INIT_LIST_HEAD(&td->td_list);
2944 INIT_LIST_HEAD(&td->cancelled_td_list);
2945
2946 if (td_index == 0) {
Sarah Sharp214f76f2010-10-26 11:22:02 -07002947 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
Sarah Sharpd13565c2011-07-22 14:34:34 -07002948 if (unlikely(ret))
Andiry Xu8e51adc2010-07-22 15:23:31 -07002949 return ret;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002950 }
2951
Andiry Xu8e51adc2010-07-22 15:23:31 -07002952 td->urb = urb;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002953 /* Add this TD to the tail of the endpoint ring's TD list */
Andiry Xu8e51adc2010-07-22 15:23:31 -07002954 list_add_tail(&td->td_list, &ep_ring->td_list);
2955 td->start_seg = ep_ring->enq_seg;
2956 td->first_trb = ep_ring->enqueue;
2957
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002958 return 0;
2959}
2960
Lu Baolu67d2ea92017-12-08 17:59:09 +02002961unsigned int count_trbs(u64 addr, u64 len)
Sarah Sharp8a96c052009-04-27 19:59:19 -07002962{
Alexandr Ivanovd2510342016-04-22 13:17:09 +03002963 unsigned int num_trbs;
Sarah Sharp8a96c052009-04-27 19:59:19 -07002964
Alexandr Ivanovd2510342016-04-22 13:17:09 +03002965 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
2966 TRB_MAX_BUFF_SIZE);
2967 if (num_trbs == 0)
2968 num_trbs++;
Sarah Sharp8a96c052009-04-27 19:59:19 -07002969
Sarah Sharp8a96c052009-04-27 19:59:19 -07002970 return num_trbs;
2971}
2972
Alexandr Ivanovd2510342016-04-22 13:17:09 +03002973static inline unsigned int count_trbs_needed(struct urb *urb)
Sarah Sharp8a96c052009-04-27 19:59:19 -07002974{
Alexandr Ivanovd2510342016-04-22 13:17:09 +03002975 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
2976}
2977
2978static unsigned int count_sg_trbs_needed(struct urb *urb)
2979{
2980 struct scatterlist *sg;
2981 unsigned int i, len, full_len, num_trbs = 0;
2982
2983 full_len = urb->transfer_buffer_length;
2984
2985 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
2986 len = sg_dma_len(sg);
2987 num_trbs += count_trbs(sg_dma_address(sg), len);
2988 len = min_t(unsigned int, len, full_len);
2989 full_len -= len;
2990 if (full_len == 0)
2991 break;
2992 }
2993
2994 return num_trbs;
2995}
2996
2997static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
2998{
2999 u64 addr, len;
3000
3001 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3002 len = urb->iso_frame_desc[i].length;
3003
3004 return count_trbs(addr, len);
3005}
3006
3007static void check_trb_math(struct urb *urb, int running_total)
3008{
3009 if (unlikely(running_total != urb->transfer_buffer_length))
Paul Zimmermana2490182011-02-12 14:06:44 -08003010 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
Sarah Sharp8a96c052009-04-27 19:59:19 -07003011 "queued %#x (%d), asked for %#x (%d)\n",
3012 __func__,
3013 urb->ep->desc.bEndpointAddress,
3014 running_total, running_total,
3015 urb->transfer_buffer_length,
3016 urb->transfer_buffer_length);
3017}
3018
Sarah Sharp23e3be12009-04-29 19:05:20 -07003019static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003020 unsigned int ep_index, unsigned int stream_id, int start_cycle,
Andiry Xue1eab2e2011-01-04 16:30:39 -08003021 struct xhci_generic_trb *start_trb)
Sarah Sharp8a96c052009-04-27 19:59:19 -07003022{
Sarah Sharp8a96c052009-04-27 19:59:19 -07003023 /*
3024 * Pass all the TRBs to the hardware at once and make sure this write
3025 * isn't reordered.
3026 */
3027 wmb();
Andiry Xu50f7b522010-12-20 15:09:34 +08003028 if (start_cycle)
Matt Evans28ccd292011-03-29 13:40:46 +11003029 start_trb->field[3] |= cpu_to_le32(start_cycle);
Andiry Xu50f7b522010-12-20 15:09:34 +08003030 else
Matt Evans28ccd292011-03-29 13:40:46 +11003031 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
Andiry Xube88fe42010-10-14 07:22:57 -07003032 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
Sarah Sharp8a96c052009-04-27 19:59:19 -07003033}
3034
Alexandr Ivanov78140152016-04-22 13:17:11 +03003035static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3036 struct xhci_ep_ctx *ep_ctx)
Sarah Sharp624defa2009-09-02 12:14:28 -07003037{
Sarah Sharp624defa2009-09-02 12:14:28 -07003038 int xhci_interval;
3039 int ep_interval;
3040
Matt Evans28ccd292011-03-29 13:40:46 +11003041 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
Sarah Sharp624defa2009-09-02 12:14:28 -07003042 ep_interval = urb->interval;
Alexandr Ivanov78140152016-04-22 13:17:11 +03003043
Sarah Sharp624defa2009-09-02 12:14:28 -07003044 /* Convert to microframes */
3045 if (urb->dev->speed == USB_SPEED_LOW ||
3046 urb->dev->speed == USB_SPEED_FULL)
3047 ep_interval *= 8;
Alexandr Ivanov78140152016-04-22 13:17:11 +03003048
Sarah Sharp624defa2009-09-02 12:14:28 -07003049 /* FIXME change this to a warning and a suggestion to use the new API
3050 * to set the polling interval (once the API is added).
3051 */
3052 if (xhci_interval != ep_interval) {
Dmitry Kasatkin0730d522013-08-27 17:47:35 +03003053 dev_dbg_ratelimited(&urb->dev->dev,
3054 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3055 ep_interval, ep_interval == 1 ? "" : "s",
3056 xhci_interval, xhci_interval == 1 ? "" : "s");
Sarah Sharp624defa2009-09-02 12:14:28 -07003057 urb->interval = xhci_interval;
3058 /* Convert back to frames for LS/FS devices */
3059 if (urb->dev->speed == USB_SPEED_LOW ||
3060 urb->dev->speed == USB_SPEED_FULL)
3061 urb->interval /= 8;
3062 }
Alexandr Ivanov78140152016-04-22 13:17:11 +03003063}
3064
3065/*
3066 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3067 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3068 * (comprised of sg list entries) can take several service intervals to
3069 * transmit.
3070 */
3071int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3072 struct urb *urb, int slot_id, unsigned int ep_index)
3073{
3074 struct xhci_ep_ctx *ep_ctx;
3075
3076 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3077 check_interval(xhci, urb, ep_ctx);
3078
Dan Carpenter3fc82062012-03-28 10:30:26 +03003079 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
Sarah Sharp624defa2009-09-02 12:14:28 -07003080}
3081
Sarah Sharp04dd9502009-11-11 10:28:30 -08003082/*
Sarah Sharp4525c0a2012-10-25 15:56:40 -07003083 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3084 * packets remaining in the TD (*not* including this TRB).
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003085 *
3086 * Total TD packet count = total_packet_count =
Sarah Sharp4525c0a2012-10-25 15:56:40 -07003087 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003088 *
3089 * Packets transferred up to and including this TRB = packets_transferred =
3090 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3091 *
3092 * TD size = total_packet_count - packets_transferred
3093 *
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003094 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3095 * including this TRB, right shifted by 10
3096 *
3097 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3098 * This is taken care of in the TRB_TD_SIZE() macro
3099 *
Sarah Sharp4525c0a2012-10-25 15:56:40 -07003100 * The last TRB in a TD must have the TD size set to zero.
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003101 */
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003102static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3103 int trb_buff_len, unsigned int td_total_len,
Mathias Nyman124c3932016-06-21 10:57:59 +03003104 struct urb *urb, bool more_trbs_coming)
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003105{
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003106 u32 maxp, total_packet_count;
3107
Chunfeng Yun72b663a2017-12-08 18:10:06 +02003108 /* MTK xHCI 0.96 contains some features from 1.0 */
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003109 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003110 return ((td_total_len - transferred) >> 10);
3111
Sarah Sharp48df4a62011-08-12 10:23:01 -07003112 /* One TRB with a zero-length data packet. */
Mathias Nyman124c3932016-06-21 10:57:59 +03003113 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003114 trb_buff_len == td_total_len)
Sarah Sharp48df4a62011-08-12 10:23:01 -07003115 return 0;
3116
Chunfeng Yun72b663a2017-12-08 18:10:06 +02003117 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
3118 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003119 trb_buff_len = 0;
3120
Felipe Balbi734d3dd2016-09-28 13:46:37 +03003121 maxp = usb_endpoint_maxp(&urb->ep->desc);
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003122 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3123
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003124 /* Queueing functions don't count the current TRB into transferred */
3125 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003126}
3127
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003128
Mathias Nyman474ed232016-06-21 10:58:01 +03003129static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003130 u32 *trb_buff_len, struct xhci_segment *seg)
Mathias Nyman474ed232016-06-21 10:58:01 +03003131{
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003132 struct device *dev = xhci_to_hcd(xhci)->self.controller;
Mathias Nyman474ed232016-06-21 10:58:01 +03003133 unsigned int unalign;
3134 unsigned int max_pkt;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003135 u32 new_buff_len;
Mathias Nyman474ed232016-06-21 10:58:01 +03003136
Felipe Balbi734d3dd2016-09-28 13:46:37 +03003137 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
Mathias Nyman474ed232016-06-21 10:58:01 +03003138 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3139
3140 /* we got lucky, last normal TRB data on segment is packet aligned */
3141 if (unalign == 0)
3142 return 0;
3143
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003144 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3145 unalign, *trb_buff_len);
3146
Mathias Nyman474ed232016-06-21 10:58:01 +03003147 /* is the last nornal TRB alignable by splitting it */
3148 if (*trb_buff_len > unalign) {
3149 *trb_buff_len -= unalign;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003150 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
Mathias Nyman474ed232016-06-21 10:58:01 +03003151 return 0;
3152 }
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003153
3154 /*
3155 * We want enqd_len + trb_buff_len to sum up to a number aligned to
3156 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
3157 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
3158 */
3159 new_buff_len = max_pkt - (enqd_len % max_pkt);
3160
3161 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3162 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3163
3164 /* create a max max_pkt sized bounce buffer pointed to by last trb */
3165 if (usb_urb_dir_out(urb)) {
3166 sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
3167 seg->bounce_buf, new_buff_len, enqd_len);
3168 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3169 max_pkt, DMA_TO_DEVICE);
3170 } else {
3171 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3172 max_pkt, DMA_FROM_DEVICE);
3173 }
3174
3175 if (dma_mapping_error(dev, seg->bounce_dma)) {
3176 /* try without aligning. Some host controllers survive */
3177 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3178 return 0;
3179 }
3180 *trb_buff_len = new_buff_len;
3181 seg->bounce_len = new_buff_len;
3182 seg->bounce_offs = enqd_len;
3183
3184 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3185
Mathias Nyman474ed232016-06-21 10:58:01 +03003186 return 1;
3187}
3188
Sarah Sharpb10de142009-04-27 19:58:50 -07003189/* This is very similar to what ehci-q.c qtd_fill() does */
Sarah Sharp23e3be12009-04-29 19:05:20 -07003190int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
Sarah Sharpb10de142009-04-27 19:58:50 -07003191 struct urb *urb, int slot_id, unsigned int ep_index)
3192{
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003193 struct xhci_ring *ring;
Andiry Xu8e51adc2010-07-22 15:23:31 -07003194 struct urb_priv *urb_priv;
Sarah Sharpb10de142009-04-27 19:58:50 -07003195 struct xhci_td *td;
Sarah Sharpb10de142009-04-27 19:58:50 -07003196 struct xhci_generic_trb *start_trb;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003197 struct scatterlist *sg = NULL;
Mathias Nyman5a83f042016-06-21 10:57:58 +03003198 bool more_trbs_coming = true;
3199 bool need_zero_pkt = false;
Mathias Nyman86065c22016-06-21 10:58:00 +03003200 bool first_trb = true;
3201 unsigned int num_trbs;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003202 unsigned int start_cycle, num_sgs = 0;
Mathias Nyman86065c22016-06-21 10:58:00 +03003203 unsigned int enqd_len, block_len, trb_buff_len, full_len;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003204 int sent_len, ret;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003205 u32 field, length_field, remainder;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003206 u64 addr, send_addr;
Sarah Sharpb10de142009-04-27 19:58:50 -07003207
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003208 ring = xhci_urb_to_transfer_ring(xhci, urb);
3209 if (!ring)
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003210 return -EINVAL;
Sarah Sharpb10de142009-04-27 19:58:50 -07003211
Mathias Nyman86065c22016-06-21 10:58:00 +03003212 full_len = urb->transfer_buffer_length;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003213 /* If we have scatter/gather list, we use it. */
3214 if (urb->num_sgs) {
3215 num_sgs = urb->num_mapped_sgs;
3216 sg = urb->sg;
Mathias Nyman86065c22016-06-21 10:58:00 +03003217 addr = (u64) sg_dma_address(sg);
3218 block_len = sg_dma_len(sg);
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003219 num_trbs = count_sg_trbs_needed(urb);
Mathias Nyman86065c22016-06-21 10:58:00 +03003220 } else {
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003221 num_trbs = count_trbs_needed(urb);
Mathias Nyman86065c22016-06-21 10:58:00 +03003222 addr = (u64) urb->transfer_dma;
3223 block_len = full_len;
3224 }
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003225 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3226 ep_index, urb->stream_id,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003227 num_trbs, urb, 0, mem_flags);
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003228 if (unlikely(ret < 0))
Sarah Sharpb10de142009-04-27 19:58:50 -07003229 return ret;
3230
Andiry Xu8e51adc2010-07-22 15:23:31 -07003231 urb_priv = urb->hcpriv;
Reyad Attiyat4758dcd2015-08-06 19:23:58 +03003232
3233 /* Deal with URB_ZERO_PACKET - need one more td/trb */
Mathias Nyman9ef7fbb2017-01-23 14:20:25 +02003234 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
Mathias Nyman5a83f042016-06-21 10:57:58 +03003235 need_zero_pkt = true;
Reyad Attiyat4758dcd2015-08-06 19:23:58 +03003236
Mathias Nyman7e64b032017-01-23 14:20:26 +02003237 td = &urb_priv->td[0];
Andiry Xu8e51adc2010-07-22 15:23:31 -07003238
Sarah Sharpb10de142009-04-27 19:58:50 -07003239 /*
3240 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3241 * until we've finished creating all the other TRBs. The ring's cycle
3242 * state may change as we enqueue the other TRBs, so save it too.
3243 */
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003244 start_trb = &ring->enqueue->generic;
3245 start_cycle = ring->cycle_state;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003246 send_addr = addr;
Sarah Sharpb10de142009-04-27 19:58:50 -07003247
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003248 /* Queue the TRBs, even if they are zero-length */
Alban Browaeys0d2daad2016-08-16 10:18:04 +03003249 for (enqd_len = 0; first_trb || enqd_len < full_len;
3250 enqd_len += trb_buff_len) {
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003251 field = TRB_TYPE(TRB_NORMAL);
3252
Mathias Nyman86065c22016-06-21 10:58:00 +03003253 /* TRB buffer should not cross 64KB boundaries */
3254 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3255 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003256
Mathias Nyman86065c22016-06-21 10:58:00 +03003257 if (enqd_len + trb_buff_len > full_len)
3258 trb_buff_len = full_len - enqd_len;
Sarah Sharpb10de142009-04-27 19:58:50 -07003259
3260 /* Don't change the cycle bit of the first TRB until later */
Mathias Nyman86065c22016-06-21 10:58:00 +03003261 if (first_trb) {
3262 first_trb = false;
Andiry Xu50f7b522010-12-20 15:09:34 +08003263 if (start_cycle == 0)
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003264 field |= TRB_CYCLE;
Andiry Xu50f7b522010-12-20 15:09:34 +08003265 } else
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003266 field |= ring->cycle_state;
Sarah Sharpb10de142009-04-27 19:58:50 -07003267
3268 /* Chain all the TRBs together; clear the chain bit in the last
3269 * TRB to indicate it's the last TRB in the chain.
3270 */
Mathias Nyman86065c22016-06-21 10:58:00 +03003271 if (enqd_len + trb_buff_len < full_len) {
Sarah Sharpb10de142009-04-27 19:58:50 -07003272 field |= TRB_CHAIN;
Mathias Nyman2d98ef42016-06-21 10:58:04 +03003273 if (trb_is_link(ring->enqueue + 1)) {
Mathias Nyman474ed232016-06-21 10:58:01 +03003274 if (xhci_align_td(xhci, urb, enqd_len,
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003275 &trb_buff_len,
3276 ring->enq_seg)) {
3277 send_addr = ring->enq_seg->bounce_dma;
3278 /* assuming TD won't span 2 segs */
3279 td->bounce_seg = ring->enq_seg;
3280 }
Mathias Nyman474ed232016-06-21 10:58:01 +03003281 }
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003282 }
3283 if (enqd_len + trb_buff_len >= full_len) {
3284 field &= ~TRB_CHAIN;
Sarah Sharpb10de142009-04-27 19:58:50 -07003285 field |= TRB_IOC;
Mathias Nyman124c3932016-06-21 10:57:59 +03003286 more_trbs_coming = false;
Mathias Nyman5a83f042016-06-21 10:57:58 +03003287 td->last_trb = ring->enqueue;
Sarah Sharpb10de142009-04-27 19:58:50 -07003288 }
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003289
3290 /* Only set interrupt on short packet for IN endpoints */
3291 if (usb_urb_dir_in(urb))
3292 field |= TRB_ISP;
3293
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003294 /* Set the TRB length, TD size, and interrupter fields. */
Mathias Nyman86065c22016-06-21 10:58:00 +03003295 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3296 full_len, urb, more_trbs_coming);
3297
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003298 length_field = TRB_LEN(trb_buff_len) |
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003299 TRB_TD_SIZE(remainder) |
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003300 TRB_INTR_TARGET(0);
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003301
Mathias Nyman124c3932016-06-21 10:57:59 +03003302 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003303 lower_32_bits(send_addr),
3304 upper_32_bits(send_addr),
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003305 length_field,
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003306 field);
3307
Sarah Sharpb10de142009-04-27 19:58:50 -07003308 addr += trb_buff_len;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003309 sent_len = trb_buff_len;
Sarah Sharpb10de142009-04-27 19:58:50 -07003310
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003311 while (sg && sent_len >= block_len) {
Mathias Nyman86065c22016-06-21 10:58:00 +03003312 /* New sg entry */
3313 --num_sgs;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003314 sent_len -= block_len;
Mathias Nyman86065c22016-06-21 10:58:00 +03003315 if (num_sgs != 0) {
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003316 sg = sg_next(sg);
Mathias Nyman86065c22016-06-21 10:58:00 +03003317 block_len = sg_dma_len(sg);
3318 addr = (u64) sg_dma_address(sg);
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003319 addr += sent_len;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003320 }
3321 }
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003322 block_len -= sent_len;
3323 send_addr = addr;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003324 }
3325
Mathias Nyman5a83f042016-06-21 10:57:58 +03003326 if (need_zero_pkt) {
3327 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3328 ep_index, urb->stream_id,
3329 1, urb, 1, mem_flags);
Mathias Nyman7e64b032017-01-23 14:20:26 +02003330 urb_priv->td[1].last_trb = ring->enqueue;
Mathias Nyman5a83f042016-06-21 10:57:58 +03003331 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3332 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3333 }
3334
Mathias Nyman86065c22016-06-21 10:58:00 +03003335 check_trb_math(urb, enqd_len);
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003336 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
Andiry Xue1eab2e2011-01-04 16:30:39 -08003337 start_cycle, start_trb);
Sarah Sharpb10de142009-04-27 19:58:50 -07003338 return 0;
3339}
3340
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003341/* Caller must have locked xhci->lock */
Sarah Sharp23e3be12009-04-29 19:05:20 -07003342int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003343 struct urb *urb, int slot_id, unsigned int ep_index)
3344{
3345 struct xhci_ring *ep_ring;
3346 int num_trbs;
3347 int ret;
3348 struct usb_ctrlrequest *setup;
3349 struct xhci_generic_trb *start_trb;
3350 int start_cycle;
Lu Baolufb79a6d2017-01-23 14:20:01 +02003351 u32 field;
Andiry Xu8e51adc2010-07-22 15:23:31 -07003352 struct urb_priv *urb_priv;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003353 struct xhci_td *td;
3354
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003355 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3356 if (!ep_ring)
3357 return -EINVAL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003358
3359 /*
3360 * Need to copy setup packet into setup TRB, so we can't use the setup
3361 * DMA address.
3362 */
3363 if (!urb->setup_packet)
3364 return -EINVAL;
3365
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003366 /* 1 TRB for setup, 1 for status */
3367 num_trbs = 2;
3368 /*
3369 * Don't need to check if we need additional event data and normal TRBs,
3370 * since data in control transfers will never get bigger than 16MB
3371 * XXX: can we get a buffer that crosses 64KB boundaries?
3372 */
3373 if (urb->transfer_buffer_length > 0)
3374 num_trbs++;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003375 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3376 ep_index, urb->stream_id,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003377 num_trbs, urb, 0, mem_flags);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003378 if (ret < 0)
3379 return ret;
3380
Andiry Xu8e51adc2010-07-22 15:23:31 -07003381 urb_priv = urb->hcpriv;
Mathias Nyman7e64b032017-01-23 14:20:26 +02003382 td = &urb_priv->td[0];
Andiry Xu8e51adc2010-07-22 15:23:31 -07003383
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003384 /*
3385 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3386 * until we've finished creating all the other TRBs. The ring's cycle
3387 * state may change as we enqueue the other TRBs, so save it too.
3388 */
3389 start_trb = &ep_ring->enqueue->generic;
3390 start_cycle = ep_ring->cycle_state;
3391
3392 /* Queue setup TRB - see section 6.4.1.2.1 */
3393 /* FIXME better way to translate setup_packet into two u32 fields? */
3394 setup = (struct usb_ctrlrequest *) urb->setup_packet;
Andiry Xu50f7b522010-12-20 15:09:34 +08003395 field = 0;
3396 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3397 if (start_cycle == 0)
3398 field |= 0x1;
Andiry Xub83cdc82011-05-05 18:13:56 +08003399
Mathias Nymandca77942015-09-21 17:46:16 +03003400 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003401 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
Andiry Xub83cdc82011-05-05 18:13:56 +08003402 if (urb->transfer_buffer_length > 0) {
3403 if (setup->bRequestType & USB_DIR_IN)
3404 field |= TRB_TX_TYPE(TRB_DATA_IN);
3405 else
3406 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3407 }
3408 }
3409
Andiry Xu3b72fca2012-03-05 17:49:32 +08003410 queue_trb(xhci, ep_ring, true,
Matt Evans28ccd292011-03-29 13:40:46 +11003411 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3412 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3413 TRB_LEN(8) | TRB_INTR_TARGET(0),
3414 /* Immediate data in pointer */
3415 field);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003416
3417 /* If there's data, queue data TRBs */
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003418 /* Only set interrupt on short packet for IN endpoints */
3419 if (usb_urb_dir_in(urb))
3420 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3421 else
3422 field = TRB_TYPE(TRB_DATA);
3423
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003424 if (urb->transfer_buffer_length > 0) {
Lu Baolufb79a6d2017-01-23 14:20:01 +02003425 u32 length_field, remainder;
3426
3427 remainder = xhci_td_remainder(xhci, 0,
3428 urb->transfer_buffer_length,
3429 urb->transfer_buffer_length,
3430 urb, 1);
3431 length_field = TRB_LEN(urb->transfer_buffer_length) |
3432 TRB_TD_SIZE(remainder) |
3433 TRB_INTR_TARGET(0);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003434 if (setup->bRequestType & USB_DIR_IN)
3435 field |= TRB_DIR_IN;
Andiry Xu3b72fca2012-03-05 17:49:32 +08003436 queue_trb(xhci, ep_ring, true,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003437 lower_32_bits(urb->transfer_dma),
3438 upper_32_bits(urb->transfer_dma),
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003439 length_field,
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003440 field | ep_ring->cycle_state);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003441 }
3442
3443 /* Save the DMA address of the last TRB in the TD */
3444 td->last_trb = ep_ring->enqueue;
3445
3446 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3447 /* If the device sent data, the status stage is an OUT transfer */
3448 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3449 field = 0;
3450 else
3451 field = TRB_DIR_IN;
Andiry Xu3b72fca2012-03-05 17:49:32 +08003452 queue_trb(xhci, ep_ring, false,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003453 0,
3454 0,
3455 TRB_INTR_TARGET(0),
3456 /* Event on completion */
3457 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3458
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003459 giveback_first_trb(xhci, slot_id, ep_index, 0,
Andiry Xue1eab2e2011-01-04 16:30:39 -08003460 start_cycle, start_trb);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003461 return 0;
3462}
3463
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003464/*
3465 * The transfer burst count field of the isochronous TRB defines the number of
3466 * bursts that are required to move all packets in this TD. Only SuperSpeed
3467 * devices can burst up to bMaxBurst number of packets per service interval.
3468 * This field is zero based, meaning a value of zero in the field means one
3469 * burst. Basically, for everything but SuperSpeed devices, this field will be
3470 * zero. Only xHCI 1.0 host controllers support this field.
3471 */
3472static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003473 struct urb *urb, unsigned int total_packet_count)
3474{
3475 unsigned int max_burst;
3476
Mathias Nyman09c352e2016-02-12 16:40:17 +02003477 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003478 return 0;
3479
3480 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
Mathias Nyman3213b152014-06-24 17:14:41 +03003481 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003482}
3483
Sarah Sharpb61d3782011-04-19 17:43:33 -07003484/*
3485 * Returns the number of packets in the last "burst" of packets. This field is
3486 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3487 * the last burst packet count is equal to the total number of packets in the
3488 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3489 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3490 * contain 1 to (bMaxBurst + 1) packets.
3491 */
3492static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
Sarah Sharpb61d3782011-04-19 17:43:33 -07003493 struct urb *urb, unsigned int total_packet_count)
3494{
3495 unsigned int max_burst;
3496 unsigned int residue;
3497
3498 if (xhci->hci_version < 0x100)
3499 return 0;
3500
Mathias Nyman09c352e2016-02-12 16:40:17 +02003501 if (urb->dev->speed >= USB_SPEED_SUPER) {
Sarah Sharpb61d3782011-04-19 17:43:33 -07003502 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3503 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3504 residue = total_packet_count % (max_burst + 1);
3505 /* If residue is zero, the last burst contains (max_burst + 1)
3506 * number of packets, but the TLBPC field is zero-based.
3507 */
3508 if (residue == 0)
3509 return max_burst;
3510 return residue - 1;
Sarah Sharpb61d3782011-04-19 17:43:33 -07003511 }
Mathias Nyman09c352e2016-02-12 16:40:17 +02003512 if (total_packet_count == 0)
3513 return 0;
3514 return total_packet_count - 1;
Sarah Sharpb61d3782011-04-19 17:43:33 -07003515}
3516
Lu Baolu79b80942015-08-06 19:24:00 +03003517/*
3518 * Calculates Frame ID field of the isochronous TRB identifies the
3519 * target frame that the Interval associated with this Isochronous
3520 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3521 *
3522 * Returns actual frame id on success, negative value on error.
3523 */
3524static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3525 struct urb *urb, int index)
3526{
3527 int start_frame, ist, ret = 0;
3528 int start_frame_id, end_frame_id, current_frame_id;
3529
3530 if (urb->dev->speed == USB_SPEED_LOW ||
3531 urb->dev->speed == USB_SPEED_FULL)
3532 start_frame = urb->start_frame + index * urb->interval;
3533 else
3534 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3535
3536 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3537 *
3538 * If bit [3] of IST is cleared to '0', software can add a TRB no
3539 * later than IST[2:0] Microframes before that TRB is scheduled to
3540 * be executed.
3541 * If bit [3] of IST is set to '1', software can add a TRB no later
3542 * than IST[2:0] Frames before that TRB is scheduled to be executed.
3543 */
3544 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3545 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3546 ist <<= 3;
3547
3548 /* Software shall not schedule an Isoch TD with a Frame ID value that
3549 * is less than the Start Frame ID or greater than the End Frame ID,
3550 * where:
3551 *
3552 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3553 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3554 *
3555 * Both the End Frame ID and Start Frame ID values are calculated
3556 * in microframes. When software determines the valid Frame ID value;
3557 * The End Frame ID value should be rounded down to the nearest Frame
3558 * boundary, and the Start Frame ID value should be rounded up to the
3559 * nearest Frame boundary.
3560 */
3561 current_frame_id = readl(&xhci->run_regs->microframe_index);
3562 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3563 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3564
3565 start_frame &= 0x7ff;
3566 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3567 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3568
3569 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3570 __func__, index, readl(&xhci->run_regs->microframe_index),
3571 start_frame_id, end_frame_id, start_frame);
3572
3573 if (start_frame_id < end_frame_id) {
3574 if (start_frame > end_frame_id ||
3575 start_frame < start_frame_id)
3576 ret = -EINVAL;
3577 } else if (start_frame_id > end_frame_id) {
3578 if ((start_frame > end_frame_id &&
3579 start_frame < start_frame_id))
3580 ret = -EINVAL;
3581 } else {
3582 ret = -EINVAL;
3583 }
3584
3585 if (index == 0) {
3586 if (ret == -EINVAL || start_frame == start_frame_id) {
3587 start_frame = start_frame_id + 1;
3588 if (urb->dev->speed == USB_SPEED_LOW ||
3589 urb->dev->speed == USB_SPEED_FULL)
3590 urb->start_frame = start_frame;
3591 else
3592 urb->start_frame = start_frame << 3;
3593 ret = 0;
3594 }
3595 }
3596
3597 if (ret) {
3598 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3599 start_frame, current_frame_id, index,
3600 start_frame_id, end_frame_id);
3601 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3602 return ret;
3603 }
3604
3605 return start_frame;
3606}
3607
Andiry Xu04e51902010-07-22 15:23:39 -07003608/* This is for isoc transfer */
3609static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3610 struct urb *urb, int slot_id, unsigned int ep_index)
3611{
3612 struct xhci_ring *ep_ring;
3613 struct urb_priv *urb_priv;
3614 struct xhci_td *td;
3615 int num_tds, trbs_per_td;
3616 struct xhci_generic_trb *start_trb;
3617 bool first_trb;
3618 int start_cycle;
3619 u32 field, length_field;
3620 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3621 u64 start_addr, addr;
3622 int i, j;
Andiry Xu47cbf692010-12-20 14:49:48 +08003623 bool more_trbs_coming;
Lu Baolu79b80942015-08-06 19:24:00 +03003624 struct xhci_virt_ep *xep;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003625 int frame_id;
Andiry Xu04e51902010-07-22 15:23:39 -07003626
Lu Baolu79b80942015-08-06 19:24:00 +03003627 xep = &xhci->devs[slot_id]->eps[ep_index];
Andiry Xu04e51902010-07-22 15:23:39 -07003628 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3629
3630 num_tds = urb->number_of_packets;
3631 if (num_tds < 1) {
3632 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3633 return -EINVAL;
3634 }
Andiry Xu04e51902010-07-22 15:23:39 -07003635 start_addr = (u64) urb->transfer_dma;
3636 start_trb = &ep_ring->enqueue->generic;
3637 start_cycle = ep_ring->cycle_state;
3638
Sarah Sharp522989a2011-07-29 12:44:32 -07003639 urb_priv = urb->hcpriv;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003640 /* Queue the TRBs for each TD, even if they are zero-length */
Andiry Xu04e51902010-07-22 15:23:39 -07003641 for (i = 0; i < num_tds; i++) {
Mathias Nyman09c352e2016-02-12 16:40:17 +02003642 unsigned int total_pkt_count, max_pkt;
3643 unsigned int burst_count, last_burst_pkt_count;
3644 u32 sia_frame_id;
Andiry Xu04e51902010-07-22 15:23:39 -07003645
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003646 first_trb = true;
Andiry Xu04e51902010-07-22 15:23:39 -07003647 running_total = 0;
3648 addr = start_addr + urb->iso_frame_desc[i].offset;
3649 td_len = urb->iso_frame_desc[i].length;
3650 td_remain_len = td_len;
Felipe Balbi734d3dd2016-09-28 13:46:37 +03003651 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
Mathias Nyman09c352e2016-02-12 16:40:17 +02003652 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
3653
Sarah Sharp48df4a62011-08-12 10:23:01 -07003654 /* A zero-length transfer still involves at least one packet. */
Mathias Nyman09c352e2016-02-12 16:40:17 +02003655 if (total_pkt_count == 0)
3656 total_pkt_count++;
3657 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
3658 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
3659 urb, total_pkt_count);
Andiry Xu04e51902010-07-22 15:23:39 -07003660
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003661 trbs_per_td = count_isoc_trbs_needed(urb, i);
Andiry Xu04e51902010-07-22 15:23:39 -07003662
3663 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003664 urb->stream_id, trbs_per_td, urb, i, mem_flags);
Sarah Sharp522989a2011-07-29 12:44:32 -07003665 if (ret < 0) {
3666 if (i == 0)
3667 return ret;
3668 goto cleanup;
3669 }
Mathias Nyman7e64b032017-01-23 14:20:26 +02003670 td = &urb_priv->td[i];
Mathias Nyman09c352e2016-02-12 16:40:17 +02003671
3672 /* use SIA as default, if frame id is used overwrite it */
3673 sia_frame_id = TRB_SIA;
3674 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3675 HCC_CFC(xhci->hcc_params)) {
3676 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
3677 if (frame_id >= 0)
3678 sia_frame_id = TRB_FRAME_ID(frame_id);
3679 }
3680 /*
3681 * Set isoc specific data for the first TRB in a TD.
3682 * Prevent HW from getting the TRBs by keeping the cycle state
3683 * inverted in the first TDs isoc TRB.
3684 */
Mathias Nyman2f6d3b62016-02-12 16:40:18 +02003685 field = TRB_TYPE(TRB_ISOC) |
Mathias Nyman09c352e2016-02-12 16:40:17 +02003686 TRB_TLBPC(last_burst_pkt_count) |
3687 sia_frame_id |
3688 (i ? ep_ring->cycle_state : !start_cycle);
3689
Mathias Nyman2f6d3b62016-02-12 16:40:18 +02003690 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
3691 if (!xep->use_extended_tbc)
3692 field |= TRB_TBC(burst_count);
3693
Mathias Nyman09c352e2016-02-12 16:40:17 +02003694 /* fill the rest of the TRB fields, and remaining normal TRBs */
Andiry Xu04e51902010-07-22 15:23:39 -07003695 for (j = 0; j < trbs_per_td; j++) {
3696 u32 remainder = 0;
Andiry Xu04e51902010-07-22 15:23:39 -07003697
Mathias Nyman09c352e2016-02-12 16:40:17 +02003698 /* only first TRB is isoc, overwrite otherwise */
3699 if (!first_trb)
3700 field = TRB_TYPE(TRB_NORMAL) |
3701 ep_ring->cycle_state;
Andiry Xu04e51902010-07-22 15:23:39 -07003702
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003703 /* Only set interrupt on short packet for IN EPs */
3704 if (usb_urb_dir_in(urb))
3705 field |= TRB_ISP;
3706
Mathias Nyman09c352e2016-02-12 16:40:17 +02003707 /* Set the chain bit for all except the last TRB */
Andiry Xu04e51902010-07-22 15:23:39 -07003708 if (j < trbs_per_td - 1) {
Andiry Xu47cbf692010-12-20 14:49:48 +08003709 more_trbs_coming = true;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003710 field |= TRB_CHAIN;
Andiry Xu04e51902010-07-22 15:23:39 -07003711 } else {
Mathias Nyman09c352e2016-02-12 16:40:17 +02003712 more_trbs_coming = false;
Andiry Xu04e51902010-07-22 15:23:39 -07003713 td->last_trb = ep_ring->enqueue;
3714 field |= TRB_IOC;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003715 /* set BEI, except for the last TD */
3716 if (xhci->hci_version >= 0x100 &&
3717 !(xhci->quirks & XHCI_AVOID_BEI) &&
3718 i < num_tds - 1)
3719 field |= TRB_BEI;
Andiry Xu04e51902010-07-22 15:23:39 -07003720 }
Andiry Xu04e51902010-07-22 15:23:39 -07003721 /* Calculate TRB length */
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003722 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
Andiry Xu04e51902010-07-22 15:23:39 -07003723 if (trb_buff_len > td_remain_len)
3724 trb_buff_len = td_remain_len;
3725
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003726 /* Set the TRB length, TD size, & interrupter fields. */
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003727 remainder = xhci_td_remainder(xhci, running_total,
3728 trb_buff_len, td_len,
Mathias Nyman124c3932016-06-21 10:57:59 +03003729 urb, more_trbs_coming);
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003730
Andiry Xu04e51902010-07-22 15:23:39 -07003731 length_field = TRB_LEN(trb_buff_len) |
Andiry Xu04e51902010-07-22 15:23:39 -07003732 TRB_INTR_TARGET(0);
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003733
Mathias Nyman2f6d3b62016-02-12 16:40:18 +02003734 /* xhci 1.1 with ETE uses TD Size field for TBC */
3735 if (first_trb && xep->use_extended_tbc)
3736 length_field |= TRB_TD_SIZE_TBC(burst_count);
3737 else
3738 length_field |= TRB_TD_SIZE(remainder);
3739 first_trb = false;
3740
Andiry Xu3b72fca2012-03-05 17:49:32 +08003741 queue_trb(xhci, ep_ring, more_trbs_coming,
Andiry Xu04e51902010-07-22 15:23:39 -07003742 lower_32_bits(addr),
3743 upper_32_bits(addr),
3744 length_field,
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003745 field);
Andiry Xu04e51902010-07-22 15:23:39 -07003746 running_total += trb_buff_len;
3747
3748 addr += trb_buff_len;
3749 td_remain_len -= trb_buff_len;
3750 }
3751
3752 /* Check TD length */
3753 if (running_total != td_len) {
3754 xhci_err(xhci, "ISOC TD length unmatch\n");
Andiry Xucf840552012-01-18 17:47:12 +08003755 ret = -EINVAL;
3756 goto cleanup;
Andiry Xu04e51902010-07-22 15:23:39 -07003757 }
3758 }
3759
Lu Baolu79b80942015-08-06 19:24:00 +03003760 /* store the next frame id */
3761 if (HCC_CFC(xhci->hcc_params))
3762 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
3763
Andiry Xuc41136b2011-03-22 17:08:14 +08003764 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3765 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3766 usb_amd_quirk_pll_disable();
3767 }
3768 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3769
Andiry Xue1eab2e2011-01-04 16:30:39 -08003770 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3771 start_cycle, start_trb);
Andiry Xu04e51902010-07-22 15:23:39 -07003772 return 0;
Sarah Sharp522989a2011-07-29 12:44:32 -07003773cleanup:
3774 /* Clean up a partially enqueued isoc transfer. */
3775
3776 for (i--; i >= 0; i--)
Mathias Nyman7e64b032017-01-23 14:20:26 +02003777 list_del_init(&urb_priv->td[i].td_list);
Sarah Sharp522989a2011-07-29 12:44:32 -07003778
3779 /* Use the first TD as a temporary variable to turn the TDs we've queued
3780 * into No-ops with a software-owned cycle bit. That way the hardware
3781 * won't accidentally start executing bogus TDs when we partially
3782 * overwrite them. td->first_trb and td->start_seg are already set.
3783 */
Mathias Nyman7e64b032017-01-23 14:20:26 +02003784 urb_priv->td[0].last_trb = ep_ring->enqueue;
Sarah Sharp522989a2011-07-29 12:44:32 -07003785 /* Every TRB except the first & last will have its cycle bit flipped. */
Mathias Nyman7e64b032017-01-23 14:20:26 +02003786 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
Sarah Sharp522989a2011-07-29 12:44:32 -07003787
3788 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
Mathias Nyman7e64b032017-01-23 14:20:26 +02003789 ep_ring->enqueue = urb_priv->td[0].first_trb;
3790 ep_ring->enq_seg = urb_priv->td[0].start_seg;
Sarah Sharp522989a2011-07-29 12:44:32 -07003791 ep_ring->cycle_state = start_cycle;
Andiry Xub008df62012-03-05 17:49:34 +08003792 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
Sarah Sharp522989a2011-07-29 12:44:32 -07003793 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3794 return ret;
Andiry Xu04e51902010-07-22 15:23:39 -07003795}
3796
3797/*
3798 * Check transfer ring to guarantee there is enough room for the urb.
3799 * Update ISO URB start_frame and interval.
Lu Baolu79b80942015-08-06 19:24:00 +03003800 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
3801 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
3802 * Contiguous Frame ID is not supported by HC.
Andiry Xu04e51902010-07-22 15:23:39 -07003803 */
3804int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3805 struct urb *urb, int slot_id, unsigned int ep_index)
3806{
3807 struct xhci_virt_device *xdev;
3808 struct xhci_ring *ep_ring;
3809 struct xhci_ep_ctx *ep_ctx;
3810 int start_frame;
Andiry Xu04e51902010-07-22 15:23:39 -07003811 int num_tds, num_trbs, i;
3812 int ret;
Lu Baolu79b80942015-08-06 19:24:00 +03003813 struct xhci_virt_ep *xep;
3814 int ist;
Andiry Xu04e51902010-07-22 15:23:39 -07003815
3816 xdev = xhci->devs[slot_id];
Lu Baolu79b80942015-08-06 19:24:00 +03003817 xep = &xhci->devs[slot_id]->eps[ep_index];
Andiry Xu04e51902010-07-22 15:23:39 -07003818 ep_ring = xdev->eps[ep_index].ring;
3819 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3820
3821 num_trbs = 0;
3822 num_tds = urb->number_of_packets;
3823 for (i = 0; i < num_tds; i++)
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003824 num_trbs += count_isoc_trbs_needed(urb, i);
Andiry Xu04e51902010-07-22 15:23:39 -07003825
3826 /* Check the ring to guarantee there is enough room for the whole urb.
3827 * Do not insert any td of the urb to the ring if the check failed.
3828 */
Mathias Nyman5071e6b2016-11-11 15:13:28 +02003829 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
Andiry Xu3b72fca2012-03-05 17:49:32 +08003830 num_trbs, mem_flags);
Andiry Xu04e51902010-07-22 15:23:39 -07003831 if (ret)
3832 return ret;
3833
Lu Baolu79b80942015-08-06 19:24:00 +03003834 /*
3835 * Check interval value. This should be done before we start to
3836 * calculate the start frame value.
3837 */
Alexandr Ivanov78140152016-04-22 13:17:11 +03003838 check_interval(xhci, urb, ep_ctx);
Lu Baolu79b80942015-08-06 19:24:00 +03003839
3840 /* Calculate the start frame and put it in urb->start_frame. */
Lu Baolu42df7212015-11-18 10:48:21 +02003841 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
Mathias Nyman5071e6b2016-11-11 15:13:28 +02003842 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
Lu Baolu42df7212015-11-18 10:48:21 +02003843 urb->start_frame = xep->next_frame_id;
3844 goto skip_start_over;
3845 }
Lu Baolu79b80942015-08-06 19:24:00 +03003846 }
3847
3848 start_frame = readl(&xhci->run_regs->microframe_index);
3849 start_frame &= 0x3fff;
3850 /*
3851 * Round up to the next frame and consider the time before trb really
3852 * gets scheduled by hardare.
3853 */
3854 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3855 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3856 ist <<= 3;
3857 start_frame += ist + XHCI_CFC_DELAY;
3858 start_frame = roundup(start_frame, 8);
3859
3860 /*
3861 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
3862 * is greate than 8 microframes.
3863 */
3864 if (urb->dev->speed == USB_SPEED_LOW ||
3865 urb->dev->speed == USB_SPEED_FULL) {
3866 start_frame = roundup(start_frame, urb->interval << 3);
3867 urb->start_frame = start_frame >> 3;
3868 } else {
3869 start_frame = roundup(start_frame, urb->interval);
3870 urb->start_frame = start_frame;
3871 }
3872
3873skip_start_over:
Andiry Xub008df62012-03-05 17:49:34 +08003874 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3875
Dan Carpenter3fc82062012-03-28 10:30:26 +03003876 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
Andiry Xu04e51902010-07-22 15:23:39 -07003877}
3878
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003879/**** Command Ring Operations ****/
3880
Sarah Sharp913a8a32009-09-04 10:53:13 -07003881/* Generic function for queueing a command TRB on the command ring.
3882 * Check to make sure there's room on the command ring for one command TRB.
3883 * Also check that there's room reserved for commands that must not fail.
3884 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3885 * then only check for the number of reserved spots.
3886 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3887 * because the command event handler may want to resubmit a failed command.
3888 */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003889static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3890 u32 field1, u32 field2,
3891 u32 field3, u32 field4, bool command_must_succeed)
Sarah Sharp7f84eef2009-04-27 19:53:56 -07003892{
Sarah Sharp913a8a32009-09-04 10:53:13 -07003893 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
Sarah Sharpd1dc9082010-07-09 17:08:38 +02003894 int ret;
Roger Quadrosad6b1d92015-05-29 17:01:49 +03003895
Mathias Nyman98d74f92016-04-08 16:25:10 +03003896 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3897 (xhci->xhc_state & XHCI_STATE_HALTED)) {
Roger Quadrosad6b1d92015-05-29 17:01:49 +03003898 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03003899 return -ESHUTDOWN;
Roger Quadrosad6b1d92015-05-29 17:01:49 +03003900 }
Sarah Sharpd1dc9082010-07-09 17:08:38 +02003901
Sarah Sharp913a8a32009-09-04 10:53:13 -07003902 if (!command_must_succeed)
3903 reserved_trbs++;
3904
Sarah Sharpd1dc9082010-07-09 17:08:38 +02003905 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003906 reserved_trbs, GFP_ATOMIC);
Sarah Sharpd1dc9082010-07-09 17:08:38 +02003907 if (ret < 0) {
3908 xhci_err(xhci, "ERR: No room for command on command ring\n");
Sarah Sharp913a8a32009-09-04 10:53:13 -07003909 if (command_must_succeed)
3910 xhci_err(xhci, "ERR: Reserved TRB counting for "
3911 "unfailable commands failed.\n");
Sarah Sharpd1dc9082010-07-09 17:08:38 +02003912 return ret;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07003913 }
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03003914
3915 cmd->command_trb = xhci->cmd_ring->enqueue;
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003916
Mathias Nymanc311e392014-05-08 19:26:03 +03003917 /* if there are no other commands queued we start the timeout timer */
Lu Baoludaa47f22017-01-23 14:20:02 +02003918 if (list_empty(&xhci->cmd_list)) {
Mathias Nymanc311e392014-05-08 19:26:03 +03003919 xhci->current_cmd = cmd;
OGAWA Hirofumicb4d5ce2017-01-03 18:28:50 +02003920 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
Mathias Nymanc311e392014-05-08 19:26:03 +03003921 }
3922
Lu Baoludaa47f22017-01-23 14:20:02 +02003923 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
3924
Andiry Xu3b72fca2012-03-05 17:49:32 +08003925 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
3926 field4 | xhci->cmd_ring->cycle_state);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07003927 return 0;
3928}
3929
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003930/* Queue a slot enable or disable request on the command ring */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003931int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
3932 u32 trb_type, u32 slot_id)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003933{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003934 return queue_command(xhci, cmd, 0, 0, 0,
Sarah Sharp913a8a32009-09-04 10:53:13 -07003935 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003936}
3937
3938/* Queue an address device command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003939int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3940 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003941{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003942 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
Sarah Sharp8e595a52009-07-27 12:03:31 -07003943 upper_32_bits(in_ctx_ptr), 0,
Dan Williams48fc7db2013-12-05 17:07:27 -08003944 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
3945 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003946}
Sarah Sharpf94e01862009-04-27 19:58:38 -07003947
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003948int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
Sarah Sharp02386342010-05-24 13:25:28 -07003949 u32 field1, u32 field2, u32 field3, u32 field4)
3950{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003951 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
Sarah Sharp02386342010-05-24 13:25:28 -07003952}
3953
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003954/* Queue a reset device command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003955int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3956 u32 slot_id)
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003957{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003958 return queue_command(xhci, cmd, 0, 0, 0,
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003959 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3960 false);
3961}
3962
Sarah Sharpf94e01862009-04-27 19:58:38 -07003963/* Queue a configure endpoint command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003964int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
3965 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
Sarah Sharp913a8a32009-09-04 10:53:13 -07003966 u32 slot_id, bool command_must_succeed)
Sarah Sharpf94e01862009-04-27 19:58:38 -07003967{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003968 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
Sarah Sharp8e595a52009-07-27 12:03:31 -07003969 upper_32_bits(in_ctx_ptr), 0,
Sarah Sharp913a8a32009-09-04 10:53:13 -07003970 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
3971 command_must_succeed);
Sarah Sharpf94e01862009-04-27 19:58:38 -07003972}
Sarah Sharpae636742009-04-29 19:02:31 -07003973
Sarah Sharpf2217e82009-08-07 14:04:43 -07003974/* Queue an evaluate context command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003975int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
3976 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
Sarah Sharpf2217e82009-08-07 14:04:43 -07003977{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003978 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
Sarah Sharpf2217e82009-08-07 14:04:43 -07003979 upper_32_bits(in_ctx_ptr), 0,
Sarah Sharp913a8a32009-09-04 10:53:13 -07003980 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
Sarah Sharp4b266542012-05-07 15:34:26 -07003981 command_must_succeed);
Sarah Sharpf2217e82009-08-07 14:04:43 -07003982}
3983
Andiry Xube88fe42010-10-14 07:22:57 -07003984/*
3985 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3986 * activity on an endpoint that is about to be suspended.
3987 */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003988int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
3989 int slot_id, unsigned int ep_index, int suspend)
Sarah Sharpae636742009-04-29 19:02:31 -07003990{
3991 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3992 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3993 u32 type = TRB_TYPE(TRB_STOP_RING);
Andiry Xube88fe42010-10-14 07:22:57 -07003994 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
Sarah Sharpae636742009-04-29 19:02:31 -07003995
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003996 return queue_command(xhci, cmd, 0, 0, 0,
Andiry Xube88fe42010-10-14 07:22:57 -07003997 trb_slot_id | trb_ep_index | type | trb_suspend, false);
Sarah Sharpae636742009-04-29 19:02:31 -07003998}
3999
Hans de Goeded3a43e62014-08-20 16:41:53 +03004000/* Set Transfer Ring Dequeue Pointer command */
4001void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
4002 unsigned int slot_id, unsigned int ep_index,
Hans de Goeded3a43e62014-08-20 16:41:53 +03004003 struct xhci_dequeue_state *deq_state)
Sarah Sharpae636742009-04-29 19:02:31 -07004004{
4005 dma_addr_t addr;
4006 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4007 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
Mathias Nyman87907362017-06-02 16:36:23 +03004008 u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
Hans de Goede95241db2013-10-04 00:29:48 +02004009 u32 trb_sct = 0;
Sarah Sharpae636742009-04-29 19:02:31 -07004010 u32 type = TRB_TYPE(TRB_SET_DEQ);
Sarah Sharpbf161e82011-02-23 15:46:42 -08004011 struct xhci_virt_ep *ep;
Hans de Goede1e3452e2014-08-20 16:41:52 +03004012 struct xhci_command *cmd;
4013 int ret;
Sarah Sharpae636742009-04-29 19:02:31 -07004014
Hans de Goeded3a43e62014-08-20 16:41:53 +03004015 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
4016 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
4017 deq_state->new_deq_seg,
4018 (unsigned long long)deq_state->new_deq_seg->dma,
4019 deq_state->new_deq_ptr,
4020 (unsigned long long)xhci_trb_virt_to_dma(
4021 deq_state->new_deq_seg, deq_state->new_deq_ptr),
4022 deq_state->new_cycle_state);
4023
4024 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
4025 deq_state->new_deq_ptr);
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07004026 if (addr == 0) {
Sarah Sharpae636742009-04-29 19:02:31 -07004027 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07004028 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
Hans de Goeded3a43e62014-08-20 16:41:53 +03004029 deq_state->new_deq_seg, deq_state->new_deq_ptr);
4030 return;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07004031 }
Sarah Sharpbf161e82011-02-23 15:46:42 -08004032 ep = &xhci->devs[slot_id]->eps[ep_index];
4033 if ((ep->ep_state & SET_DEQ_PENDING)) {
4034 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4035 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
Hans de Goeded3a43e62014-08-20 16:41:53 +03004036 return;
Sarah Sharpbf161e82011-02-23 15:46:42 -08004037 }
Hans de Goede1e3452e2014-08-20 16:41:52 +03004038
4039 /* This function gets called from contexts where it cannot sleep */
Mathias Nyman103afda2017-12-08 17:59:08 +02004040 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
Lu Baolu74e0b562017-04-07 17:57:05 +03004041 if (!cmd)
Hans de Goeded3a43e62014-08-20 16:41:53 +03004042 return;
Hans de Goede1e3452e2014-08-20 16:41:52 +03004043
Hans de Goeded3a43e62014-08-20 16:41:53 +03004044 ep->queued_deq_seg = deq_state->new_deq_seg;
4045 ep->queued_deq_ptr = deq_state->new_deq_ptr;
Mathias Nyman87907362017-06-02 16:36:23 +03004046 if (deq_state->stream_id)
Hans de Goede95241db2013-10-04 00:29:48 +02004047 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
Hans de Goede1e3452e2014-08-20 16:41:52 +03004048 ret = queue_command(xhci, cmd,
Hans de Goeded3a43e62014-08-20 16:41:53 +03004049 lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
4050 upper_32_bits(addr), trb_stream_id,
4051 trb_slot_id | trb_ep_index | type, false);
Hans de Goede1e3452e2014-08-20 16:41:52 +03004052 if (ret < 0) {
4053 xhci_free_command(xhci, cmd);
Hans de Goeded3a43e62014-08-20 16:41:53 +03004054 return;
Hans de Goede1e3452e2014-08-20 16:41:52 +03004055 }
4056
Hans de Goeded3a43e62014-08-20 16:41:53 +03004057 /* Stop the TD queueing code from ringing the doorbell until
4058 * this command completes. The HC won't set the dequeue pointer
4059 * if the ring is running, and ringing the doorbell starts the
4060 * ring running.
4061 */
4062 ep->ep_state |= SET_DEQ_PENDING;
Sarah Sharpae636742009-04-29 19:02:31 -07004063}
Sarah Sharpa1587d92009-07-27 12:03:15 -07004064
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004065int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
Mathias Nyman21749142017-06-15 11:55:44 +03004066 int slot_id, unsigned int ep_index,
4067 enum xhci_ep_reset_type reset_type)
Sarah Sharpa1587d92009-07-27 12:03:15 -07004068{
4069 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4070 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4071 u32 type = TRB_TYPE(TRB_RESET_EP);
4072
Mathias Nyman21749142017-06-15 11:55:44 +03004073 if (reset_type == EP_SOFT_RESET)
4074 type |= TRB_TSP;
4075
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004076 return queue_command(xhci, cmd, 0, 0, 0,
4077 trb_slot_id | trb_ep_index | type, false);
Sarah Sharpa1587d92009-07-27 12:03:15 -07004078}