blob: c691fae22e4f8244fa87eb6190602eedb563ca49 [file] [log] [blame]
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
Sarah Sharp8a96c052009-04-27 19:59:19 -070067#include <linux/scatterlist.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090068#include <linux/slab.h>
Mathias Nymanf9c589e2016-06-21 10:58:02 +030069#include <linux/dma-mapping.h>
Sarah Sharp7f84eef2009-04-27 19:53:56 -070070#include "xhci.h"
Xenia Ragiadakou3a7fa5b2013-07-31 07:35:27 +030071#include "xhci-trace.h"
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +020072#include "xhci-mtk.h"
Sarah Sharp7f84eef2009-04-27 19:53:56 -070073
74/*
75 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
76 * address of the TRB.
77 */
Sarah Sharp23e3be12009-04-29 19:05:20 -070078dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
Sarah Sharp7f84eef2009-04-27 19:53:56 -070079 union xhci_trb *trb)
80{
Sarah Sharp6071d832009-05-14 11:44:14 -070081 unsigned long segment_offset;
Sarah Sharp7f84eef2009-04-27 19:53:56 -070082
Sarah Sharp6071d832009-05-14 11:44:14 -070083 if (!seg || !trb || trb < seg->trbs)
Sarah Sharp7f84eef2009-04-27 19:53:56 -070084 return 0;
Sarah Sharp6071d832009-05-14 11:44:14 -070085 /* offset in TRBs */
86 segment_offset = trb - seg->trbs;
Mathias Nyman78950862015-08-03 16:07:48 +030087 if (segment_offset >= TRBS_PER_SEGMENT)
Sarah Sharp7f84eef2009-04-27 19:53:56 -070088 return 0;
Sarah Sharp6071d832009-05-14 11:44:14 -070089 return seg->dma + (segment_offset * sizeof(*trb));
Sarah Sharp7f84eef2009-04-27 19:53:56 -070090}
91
Mathias Nyman2d98ef42016-06-21 10:58:04 +030092static bool trb_is_link(union xhci_trb *trb)
93{
94 return TRB_TYPE_LINK_LE32(trb->link.control);
95}
96
Mathias Nymanbd5e67f2016-06-21 10:58:05 +030097static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
98{
99 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
100}
101
102static bool last_trb_on_ring(struct xhci_ring *ring,
103 struct xhci_segment *seg, union xhci_trb *trb)
104{
105 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
106}
107
Mathias Nymand0c77d82016-06-21 10:58:07 +0300108static bool link_trb_toggles_cycle(union xhci_trb *trb)
109{
110 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
111}
112
Sarah Sharpae636742009-04-29 19:02:31 -0700113/* Updates trb to point to the next TRB in the ring, and updates seg if the next
114 * TRB is in a new segment. This does not skip over link TRBs, and it does not
115 * effect the ring dequeue or enqueue pointers.
116 */
117static void next_trb(struct xhci_hcd *xhci,
118 struct xhci_ring *ring,
119 struct xhci_segment **seg,
120 union xhci_trb **trb)
121{
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300122 if (trb_is_link(*trb)) {
Sarah Sharpae636742009-04-29 19:02:31 -0700123 *seg = (*seg)->next;
124 *trb = ((*seg)->trbs);
125 } else {
John Youna1669b22010-08-09 13:56:11 -0700126 (*trb)++;
Sarah Sharpae636742009-04-29 19:02:31 -0700127 }
128}
129
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700130/*
131 * See Cycle bit rules. SW is the consumer for the event ring only.
132 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
133 */
Andiry Xu3b72fca2012-03-05 17:49:32 +0800134static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700135{
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700136 ring->deq_updates++;
Andiry Xub008df62012-03-05 17:49:34 +0800137
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300138 /* event ring doesn't have link trbs, check for last trb */
139 if (ring->type == TYPE_EVENT) {
140 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
Sarah Sharp50d02062012-07-26 12:03:59 -0700141 ring->dequeue++;
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300142 return;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700143 }
Mathias Nymanbd5e67f2016-06-21 10:58:05 +0300144 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
145 ring->cycle_state ^= 1;
146 ring->deq_seg = ring->deq_seg->next;
147 ring->dequeue = ring->deq_seg->trbs;
148 return;
149 }
150
151 /* All other rings have link trbs */
152 if (!trb_is_link(ring->dequeue)) {
153 ring->dequeue++;
154 ring->num_trbs_free++;
155 }
156 while (trb_is_link(ring->dequeue)) {
157 ring->deq_seg = ring->deq_seg->next;
158 ring->dequeue = ring->deq_seg->trbs;
159 }
160 return;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700161}
162
163/*
164 * See Cycle bit rules. SW is the consumer for the event ring only.
165 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
166 *
167 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
168 * chain bit is set), then set the chain bit in all the following link TRBs.
169 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
170 * have their chain bit cleared (so that each Link TRB is a separate TD).
171 *
172 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
Sarah Sharpb0567b32009-08-07 14:04:36 -0700173 * set, but other sections talk about dealing with the chain bit set. This was
174 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
175 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
Sarah Sharp6cc30d82010-06-10 12:25:28 -0700176 *
177 * @more_trbs_coming: Will you enqueue more TRBs before calling
178 * prepare_transfer()?
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700179 */
Sarah Sharp6cc30d82010-06-10 12:25:28 -0700180static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
Andiry Xu3b72fca2012-03-05 17:49:32 +0800181 bool more_trbs_coming)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700182{
183 u32 chain;
184 union xhci_trb *next;
185
Matt Evans28ccd292011-03-29 13:40:46 +1100186 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
Andiry Xub008df62012-03-05 17:49:34 +0800187 /* If this is not event ring, there is one less usable TRB */
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300188 if (!trb_is_link(ring->enqueue))
Andiry Xub008df62012-03-05 17:49:34 +0800189 ring->num_trbs_free--;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700190 next = ++(ring->enqueue);
191
192 ring->enq_updates++;
Mathias Nyman22511982016-06-21 10:58:03 +0300193 /* Update the dequeue pointer further if that was a link TRB */
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300194 while (trb_is_link(next)) {
Sarah Sharp6cc30d82010-06-10 12:25:28 -0700195
Mathias Nyman22511982016-06-21 10:58:03 +0300196 /*
197 * If the caller doesn't plan on enqueueing more TDs before
198 * ringing the doorbell, then we don't want to give the link TRB
199 * to the hardware just yet. We'll give the link TRB back in
200 * prepare_ring() just before we enqueue the TD at the top of
201 * the ring.
202 */
203 if (!chain && !more_trbs_coming)
204 break;
Andiry Xu3b72fca2012-03-05 17:49:32 +0800205
Mathias Nyman22511982016-06-21 10:58:03 +0300206 /* If we're not dealing with 0.95 hardware or isoc rings on
207 * AMD 0.96 host, carry over the chain bit of the previous TRB
208 * (which may mean the chain bit is cleared).
209 */
210 if (!(ring->type == TYPE_ISOC &&
211 (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
212 !xhci_link_trb_quirk(xhci)) {
213 next->link.control &= cpu_to_le32(~TRB_CHAIN);
214 next->link.control |= cpu_to_le32(chain);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700215 }
Mathias Nyman22511982016-06-21 10:58:03 +0300216 /* Give this link TRB to the hardware */
217 wmb();
218 next->link.control ^= cpu_to_le32(TRB_CYCLE);
219
220 /* Toggle the cycle bit after the last ring segment. */
Mathias Nymand0c77d82016-06-21 10:58:07 +0300221 if (link_trb_toggles_cycle(next))
Mathias Nyman22511982016-06-21 10:58:03 +0300222 ring->cycle_state ^= 1;
223
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700224 ring->enq_seg = ring->enq_seg->next;
225 ring->enqueue = ring->enq_seg->trbs;
226 next = ring->enqueue;
227 }
228}
229
230/*
Andiry Xu085deb12012-03-05 17:49:40 +0800231 * Check to see if there's room to enqueue num_trbs on the ring and make sure
232 * enqueue pointer will not advance into dequeue segment. See rules above.
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700233 */
Andiry Xub008df62012-03-05 17:49:34 +0800234static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700235 unsigned int num_trbs)
236{
Andiry Xu085deb12012-03-05 17:49:40 +0800237 int num_trbs_in_deq_seg;
Andiry Xub008df62012-03-05 17:49:34 +0800238
Andiry Xu085deb12012-03-05 17:49:40 +0800239 if (ring->num_trbs_free < num_trbs)
240 return 0;
241
242 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
243 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
244 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
245 return 0;
246 }
247
248 return 1;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700249}
250
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700251/* Ring the host controller doorbell after placing a command on the ring */
Sarah Sharp23e3be12009-04-29 19:05:20 -0700252void xhci_ring_cmd_db(struct xhci_hcd *xhci)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700253{
Elric Fuc181bc52012-06-27 16:30:57 +0800254 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
255 return;
256
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700257 xhci_dbg(xhci, "// Ding dong!\n");
Xenia Ragiadakou204b7792013-11-15 05:34:07 +0200258 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700259 /* Flush PCI posted writes */
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +0200260 readl(&xhci->dba->doorbell[0]);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700261}
262
Elric Fub92cc662012-06-27 16:31:12 +0800263static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
264{
265 u64 temp_64;
266 int ret;
267
268 xhci_dbg(xhci, "Abort command ring\n");
269
Sarah Sharpf7b2e402014-01-30 13:27:49 -0800270 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
Elric Fub92cc662012-06-27 16:31:12 +0800271 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
Mathias Nyman3425aa02016-06-01 18:09:08 +0300272
273 /*
274 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
275 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
276 * but the completion event in never sent. Use the cmd timeout timer to
277 * handle those cases. Use twice the time to cover the bit polling retry
278 */
279 mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
Sarah Sharp477632d2014-01-29 14:02:00 -0800280 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
281 &xhci->op_regs->cmd_ring);
Elric Fub92cc662012-06-27 16:31:12 +0800282
283 /* Section 4.6.1.2 of xHCI 1.0 spec says software should
284 * time the completion od all xHCI commands, including
285 * the Command Abort operation. If software doesn't see
286 * CRR negated in a timely manner (e.g. longer than 5
287 * seconds), then it should assume that the there are
288 * larger problems with the xHC and assert HCRST.
289 */
Lin Wangdc0b1772015-01-09 16:06:28 +0200290 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
Elric Fub92cc662012-06-27 16:31:12 +0800291 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
292 if (ret < 0) {
Mathias Nymana6809ff2015-09-21 17:46:10 +0300293 /* we are about to kill xhci, give it one more chance */
294 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
295 &xhci->op_regs->cmd_ring);
296 udelay(1000);
297 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
298 CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
299 if (ret == 0)
300 return 0;
301
Elric Fub92cc662012-06-27 16:31:12 +0800302 xhci_err(xhci, "Stopped the command ring failed, "
303 "maybe the host is dead\n");
Mathias Nyman3425aa02016-06-01 18:09:08 +0300304 del_timer(&xhci->cmd_timer);
Elric Fub92cc662012-06-27 16:31:12 +0800305 xhci->xhc_state |= XHCI_STATE_DYING;
Elric Fub92cc662012-06-27 16:31:12 +0800306 xhci_halt(xhci);
307 return -ESHUTDOWN;
308 }
309
310 return 0;
311}
312
Andiry Xube88fe42010-10-14 07:22:57 -0700313void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
Sarah Sharpae636742009-04-29 19:02:31 -0700314 unsigned int slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700315 unsigned int ep_index,
316 unsigned int stream_id)
Sarah Sharpae636742009-04-29 19:02:31 -0700317{
Matt Evans28ccd292011-03-29 13:40:46 +1100318 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
Matthew Wilcox50d646762010-12-15 14:18:11 -0500319 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
320 unsigned int ep_state = ep->ep_state;
Sarah Sharpae636742009-04-29 19:02:31 -0700321
Sarah Sharpae636742009-04-29 19:02:31 -0700322 /* Don't ring the doorbell for this endpoint if there are pending
Matthew Wilcox50d646762010-12-15 14:18:11 -0500323 * cancellations because we don't want to interrupt processing.
Sarah Sharp8df75f42010-04-02 15:34:16 -0700324 * We don't want to restart any stream rings if there's a set dequeue
325 * pointer command pending because the device can choose to start any
326 * stream once the endpoint is on the HW schedule.
Sarah Sharpae636742009-04-29 19:02:31 -0700327 */
Matthew Wilcox50d646762010-12-15 14:18:11 -0500328 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
329 (ep_state & EP_HALTED))
330 return;
Xenia Ragiadakou204b7792013-11-15 05:34:07 +0200331 writel(DB_VALUE(ep_index, stream_id), db_addr);
Matthew Wilcox50d646762010-12-15 14:18:11 -0500332 /* The CPU has better things to do at this point than wait for a
333 * write-posting flush. It'll get there soon enough.
334 */
Sarah Sharpae636742009-04-29 19:02:31 -0700335}
336
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700337/* Ring the doorbell for any rings with pending URBs */
338static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
339 unsigned int slot_id,
340 unsigned int ep_index)
341{
342 unsigned int stream_id;
343 struct xhci_virt_ep *ep;
344
345 ep = &xhci->devs[slot_id]->eps[ep_index];
346
347 /* A ring has pending URBs if its TD list is not empty */
348 if (!(ep->ep_state & EP_HAS_STREAMS)) {
Oleksij Rempeld66eaf92013-07-21 15:36:19 +0200349 if (ep->ring && !(list_empty(&ep->ring->td_list)))
Andiry Xube88fe42010-10-14 07:22:57 -0700350 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700351 return;
352 }
353
354 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
355 stream_id++) {
356 struct xhci_stream_info *stream_info = ep->stream_info;
357 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
Andiry Xube88fe42010-10-14 07:22:57 -0700358 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
359 stream_id);
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700360 }
361}
362
Alexandr Ivanov75b040e2016-04-22 13:17:10 +0300363/* Get the right ring for the given slot_id, ep_index and stream_id.
364 * If the endpoint supports streams, boundary check the URB's stream ID.
365 * If the endpoint doesn't support streams, return the singular endpoint ring.
366 */
367struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
Sarah Sharp021bff92010-07-29 22:12:20 -0700368 unsigned int slot_id, unsigned int ep_index,
369 unsigned int stream_id)
370{
371 struct xhci_virt_ep *ep;
372
373 ep = &xhci->devs[slot_id]->eps[ep_index];
374 /* Common case: no streams */
375 if (!(ep->ep_state & EP_HAS_STREAMS))
376 return ep->ring;
377
378 if (stream_id == 0) {
379 xhci_warn(xhci,
380 "WARN: Slot ID %u, ep index %u has streams, "
381 "but URB has no stream ID.\n",
382 slot_id, ep_index);
383 return NULL;
384 }
385
386 if (stream_id < ep->stream_info->num_streams)
387 return ep->stream_info->stream_rings[stream_id];
388
389 xhci_warn(xhci,
390 "WARN: Slot ID %u, ep index %u has "
391 "stream IDs 1 to %u allocated, "
392 "but stream ID %u is requested.\n",
393 slot_id, ep_index,
394 ep->stream_info->num_streams - 1,
395 stream_id);
396 return NULL;
397}
398
Sarah Sharpae636742009-04-29 19:02:31 -0700399/*
400 * Move the xHC's endpoint ring dequeue pointer past cur_td.
401 * Record the new state of the xHC's endpoint ring dequeue segment,
402 * dequeue pointer, and new consumer cycle state in state.
403 * Update our internal representation of the ring's dequeue pointer.
404 *
405 * We do this in three jumps:
406 * - First we update our new ring state to be the same as when the xHC stopped.
407 * - Then we traverse the ring to find the segment that contains
408 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
409 * any link TRBs with the toggle cycle bit set.
410 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
411 * if we've moved it past a link TRB with the toggle cycle bit set.
Matt Evans28ccd292011-03-29 13:40:46 +1100412 *
413 * Some of the uses of xhci_generic_trb are grotty, but if they're done
414 * with correct __le32 accesses they should work fine. Only users of this are
415 * in here.
Sarah Sharpae636742009-04-29 19:02:31 -0700416 */
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700417void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
Sarah Sharpae636742009-04-29 19:02:31 -0700418 unsigned int slot_id, unsigned int ep_index,
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700419 unsigned int stream_id, struct xhci_td *cur_td,
420 struct xhci_dequeue_state *state)
Sarah Sharpae636742009-04-29 19:02:31 -0700421{
422 struct xhci_virt_device *dev = xhci->devs[slot_id];
Hans de Goedec4bedb72013-10-04 00:29:47 +0200423 struct xhci_virt_ep *ep = &dev->eps[ep_index];
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700424 struct xhci_ring *ep_ring;
Mathias Nyman365038d2014-08-19 15:17:58 +0300425 struct xhci_segment *new_seg;
426 union xhci_trb *new_deq;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700427 dma_addr_t addr;
Julius Werner1f81b6d2014-04-25 19:20:13 +0300428 u64 hw_dequeue;
Mathias Nyman365038d2014-08-19 15:17:58 +0300429 bool cycle_found = false;
430 bool td_last_trb_found = false;
Sarah Sharpae636742009-04-29 19:02:31 -0700431
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700432 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
433 ep_index, stream_id);
434 if (!ep_ring) {
435 xhci_warn(xhci, "WARN can't find new dequeue state "
436 "for invalid stream ID %u.\n",
437 stream_id);
438 return;
439 }
Paul Zimmerman68e41c52011-02-12 14:06:06 -0800440
Sarah Sharpae636742009-04-29 19:02:31 -0700441 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300442 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
443 "Finding endpoint context");
Hans de Goedec4bedb72013-10-04 00:29:47 +0200444 /* 4.6.9 the css flag is written to the stream context for streams */
445 if (ep->ep_state & EP_HAS_STREAMS) {
446 struct xhci_stream_ctx *ctx =
447 &ep->stream_info->stream_ctx_array[stream_id];
Julius Werner1f81b6d2014-04-25 19:20:13 +0300448 hw_dequeue = le64_to_cpu(ctx->stream_ring);
Hans de Goedec4bedb72013-10-04 00:29:47 +0200449 } else {
450 struct xhci_ep_ctx *ep_ctx
451 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
Julius Werner1f81b6d2014-04-25 19:20:13 +0300452 hw_dequeue = le64_to_cpu(ep_ctx->deq);
Hans de Goedec4bedb72013-10-04 00:29:47 +0200453 }
Sarah Sharpae636742009-04-29 19:02:31 -0700454
Mathias Nyman365038d2014-08-19 15:17:58 +0300455 new_seg = ep_ring->deq_seg;
456 new_deq = ep_ring->dequeue;
457 state->new_cycle_state = hw_dequeue & 0x1;
458
459 /*
460 * We want to find the pointer, segment and cycle state of the new trb
461 * (the one after current TD's last_trb). We know the cycle state at
462 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
463 * found.
464 */
465 do {
466 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
467 == (dma_addr_t)(hw_dequeue & ~0xf)) {
468 cycle_found = true;
469 if (td_last_trb_found)
470 break;
471 }
472 if (new_deq == cur_td->last_trb)
473 td_last_trb_found = true;
474
Mathias Nyman3495e452016-11-11 15:13:13 +0200475 if (cycle_found && trb_is_link(new_deq) &&
476 link_trb_toggles_cycle(new_deq))
Mathias Nyman365038d2014-08-19 15:17:58 +0300477 state->new_cycle_state ^= 0x1;
478
479 next_trb(xhci, ep_ring, &new_seg, &new_deq);
480
481 /* Search wrapped around, bail out */
482 if (new_deq == ep->ring->dequeue) {
483 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
484 state->new_deq_seg = NULL;
485 state->new_deq_ptr = NULL;
Julius Werner1f81b6d2014-04-25 19:20:13 +0300486 return;
487 }
Julius Werner1f81b6d2014-04-25 19:20:13 +0300488
Mathias Nyman365038d2014-08-19 15:17:58 +0300489 } while (!cycle_found || !td_last_trb_found);
Sarah Sharpae636742009-04-29 19:02:31 -0700490
Mathias Nyman365038d2014-08-19 15:17:58 +0300491 state->new_deq_seg = new_seg;
492 state->new_deq_ptr = new_deq;
Sarah Sharpae636742009-04-29 19:02:31 -0700493
Julius Werner1f81b6d2014-04-25 19:20:13 +0300494 /* Don't update the ring cycle state for the producer (us). */
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300495 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
496 "Cycle state = 0x%x", state->new_cycle_state);
Sarah Sharp01a1fdb2011-02-23 18:12:29 -0800497
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300498 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
499 "New dequeue segment = %p (virtual)",
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700500 state->new_deq_seg);
501 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300502 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
503 "New dequeue pointer = 0x%llx (DMA)",
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700504 (unsigned long long) addr);
Sarah Sharpae636742009-04-29 19:02:31 -0700505}
506
Sarah Sharp522989a2011-07-29 12:44:32 -0700507/* flip_cycle means flip the cycle bit of all but the first and last TRB.
508 * (The last TRB actually points to the ring enqueue pointer, which is not part
509 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
510 */
Sarah Sharp23e3be12009-04-29 19:05:20 -0700511static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
Sarah Sharp522989a2011-07-29 12:44:32 -0700512 struct xhci_td *cur_td, bool flip_cycle)
Sarah Sharpae636742009-04-29 19:02:31 -0700513{
514 struct xhci_segment *cur_seg;
515 union xhci_trb *cur_trb;
516
517 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
518 true;
519 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
Mathias Nyman3495e452016-11-11 15:13:13 +0200520 if (trb_is_link(cur_trb)) {
Sarah Sharpae636742009-04-29 19:02:31 -0700521 /* Unchain any chained Link TRBs, but
522 * leave the pointers intact.
523 */
Matt Evans28ccd292011-03-29 13:40:46 +1100524 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
Sarah Sharp522989a2011-07-29 12:44:32 -0700525 /* Flip the cycle bit (link TRBs can't be the first
526 * or last TRB).
527 */
528 if (flip_cycle)
529 cur_trb->generic.field[3] ^=
530 cpu_to_le32(TRB_CYCLE);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300531 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
532 "Cancel (unchain) link TRB");
533 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
534 "Address = %p (0x%llx dma); "
535 "in seg %p (0x%llx dma)",
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700536 cur_trb,
Sarah Sharp23e3be12009-04-29 19:05:20 -0700537 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700538 cur_seg,
539 (unsigned long long)cur_seg->dma);
Sarah Sharpae636742009-04-29 19:02:31 -0700540 } else {
541 cur_trb->generic.field[0] = 0;
542 cur_trb->generic.field[1] = 0;
543 cur_trb->generic.field[2] = 0;
544 /* Preserve only the cycle bit of this TRB */
Matt Evans28ccd292011-03-29 13:40:46 +1100545 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
Sarah Sharp522989a2011-07-29 12:44:32 -0700546 /* Flip the cycle bit except on the first or last TRB */
547 if (flip_cycle && cur_trb != cur_td->first_trb &&
548 cur_trb != cur_td->last_trb)
549 cur_trb->generic.field[3] ^=
550 cpu_to_le32(TRB_CYCLE);
Matt Evans28ccd292011-03-29 13:40:46 +1100551 cur_trb->generic.field[3] |= cpu_to_le32(
552 TRB_TYPE(TRB_TR_NOOP));
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300553 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
554 "TRB to noop at offset 0x%llx",
Sarah Sharp79688ac2011-12-19 16:56:04 -0800555 (unsigned long long)
556 xhci_trb_virt_to_dma(cur_seg, cur_trb));
Sarah Sharpae636742009-04-29 19:02:31 -0700557 }
558 if (cur_trb == cur_td->last_trb)
559 break;
560 }
561}
562
Dmitry Torokhov575688e2011-03-20 02:15:16 -0700563static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700564 struct xhci_virt_ep *ep)
565{
566 ep->ep_state &= ~EP_HALT_PENDING;
567 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
568 * timer is running on another CPU, we don't decrement stop_cmds_pending
569 * (since we didn't successfully stop the watchdog timer).
570 */
571 if (del_timer(&ep->stop_cmd_timer))
572 ep->stop_cmds_pending--;
573}
574
575/* Must be called with xhci->lock held in interrupt context */
576static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
Xenia Ragiadakou07a37e92013-09-09 13:29:45 +0300577 struct xhci_td *cur_td, int status)
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700578{
Sarah Sharp214f76f2010-10-26 11:22:02 -0700579 struct usb_hcd *hcd;
Andiry Xu8e51adc2010-07-22 15:23:31 -0700580 struct urb *urb;
581 struct urb_priv *urb_priv;
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700582
Andiry Xu8e51adc2010-07-22 15:23:31 -0700583 urb = cur_td->urb;
584 urb_priv = urb->hcpriv;
585 urb_priv->td_cnt++;
Sarah Sharp214f76f2010-10-26 11:22:02 -0700586 hcd = bus_to_hcd(urb->dev->bus);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700587
Andiry Xu8e51adc2010-07-22 15:23:31 -0700588 /* Only giveback urb when this is the last td in urb */
589 if (urb_priv->td_cnt == urb_priv->length) {
Andiry Xuc41136b2011-03-22 17:08:14 +0800590 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
591 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
592 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
593 if (xhci->quirks & XHCI_AMD_PLL_FIX)
594 usb_amd_quirk_pll_enable();
595 }
596 }
Andiry Xu8e51adc2010-07-22 15:23:31 -0700597 usb_hcd_unlink_urb_from_ep(hcd, urb);
Andiry Xu8e51adc2010-07-22 15:23:31 -0700598
599 spin_unlock(&xhci->lock);
600 usb_hcd_giveback_urb(hcd, urb, status);
Lin Wang4daf9df2015-01-09 16:06:31 +0200601 xhci_urb_free_priv(urb_priv);
Andiry Xu8e51adc2010-07-22 15:23:31 -0700602 spin_lock(&xhci->lock);
Andiry Xu8e51adc2010-07-22 15:23:31 -0700603 }
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700604}
605
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300606void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
607 struct xhci_td *td)
608{
609 struct device *dev = xhci_to_hcd(xhci)->self.controller;
610 struct xhci_segment *seg = td->bounce_seg;
611 struct urb *urb = td->urb;
612
613 if (!seg || !urb)
614 return;
615
616 if (usb_urb_dir_out(urb)) {
617 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
618 DMA_TO_DEVICE);
619 return;
620 }
621
622 /* for in tranfers we need to copy the data from bounce to sg */
623 sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
624 seg->bounce_len, seg->bounce_offs);
625 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
626 DMA_FROM_DEVICE);
627 seg->bounce_len = 0;
628 seg->bounce_offs = 0;
629}
630
Sarah Sharpae636742009-04-29 19:02:31 -0700631/*
632 * When we get a command completion for a Stop Endpoint Command, we need to
633 * unlink any cancelled TDs from the ring. There are two ways to do that:
634 *
635 * 1. If the HW was in the middle of processing the TD that needs to be
636 * cancelled, then we must move the ring's dequeue pointer past the last TRB
637 * in the TD with a Set Dequeue Pointer Command.
638 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
639 * bit cleared) so that the HW will skip over them.
640 */
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +0300641static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
Andiry Xube88fe42010-10-14 07:22:57 -0700642 union xhci_trb *trb, struct xhci_event_cmd *event)
Sarah Sharpae636742009-04-29 19:02:31 -0700643{
Sarah Sharpae636742009-04-29 19:02:31 -0700644 unsigned int ep_index;
645 struct xhci_ring *ep_ring;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700646 struct xhci_virt_ep *ep;
Sarah Sharpae636742009-04-29 19:02:31 -0700647 struct list_head *entry;
Randy Dunlap326b4812010-04-19 08:53:50 -0700648 struct xhci_td *cur_td = NULL;
Sarah Sharpae636742009-04-29 19:02:31 -0700649 struct xhci_td *last_unlinked_td;
650
Sarah Sharpc92bcfa2009-07-27 12:05:21 -0700651 struct xhci_dequeue_state deq_state;
Sarah Sharpae636742009-04-29 19:02:31 -0700652
Xenia Ragiadakoubc752bd2013-09-09 13:29:59 +0300653 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
Mathias Nyman9ea18332014-05-08 19:26:02 +0300654 if (!xhci->devs[slot_id])
Andiry Xube88fe42010-10-14 07:22:57 -0700655 xhci_warn(xhci, "Stop endpoint command "
656 "completion for disabled slot %u\n",
657 slot_id);
658 return;
659 }
660
Sarah Sharpae636742009-04-29 19:02:31 -0700661 memset(&deq_state, 0, sizeof(deq_state));
Matt Evans28ccd292011-03-29 13:40:46 +1100662 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700663 ep = &xhci->devs[slot_id]->eps[ep_index];
Sarah Sharpae636742009-04-29 19:02:31 -0700664
Sarah Sharp678539c2009-10-27 10:55:52 -0700665 if (list_empty(&ep->cancelled_td_list)) {
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700666 xhci_stop_watchdog_timer_in_irq(xhci, ep);
Sarah Sharp0714a572011-05-24 11:53:29 -0700667 ep->stopped_td = NULL;
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700668 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -0700669 return;
Sarah Sharp678539c2009-10-27 10:55:52 -0700670 }
Sarah Sharpae636742009-04-29 19:02:31 -0700671
672 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
673 * We have the xHCI lock, so nothing can modify this list until we drop
674 * it. We're also in the event handler, so we can't get re-interrupted
675 * if another Stop Endpoint command completes
676 */
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700677 list_for_each(entry, &ep->cancelled_td_list) {
Sarah Sharpae636742009-04-29 19:02:31 -0700678 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300679 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
680 "Removing canceled TD starting at 0x%llx (dma).",
Sarah Sharp79688ac2011-12-19 16:56:04 -0800681 (unsigned long long)xhci_trb_virt_to_dma(
682 cur_td->start_seg, cur_td->first_trb));
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700683 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
684 if (!ep_ring) {
685 /* This shouldn't happen unless a driver is mucking
686 * with the stream ID after submission. This will
687 * leave the TD on the hardware ring, and the hardware
688 * will try to execute it, and may access a buffer
689 * that has already been freed. In the best case, the
690 * hardware will execute it, and the event handler will
691 * ignore the completion event for that TD, since it was
692 * removed from the td_list for that endpoint. In
693 * short, don't muck with the stream ID after
694 * submission.
695 */
696 xhci_warn(xhci, "WARN Cancelled URB %p "
697 "has invalid stream ID %u.\n",
698 cur_td->urb,
699 cur_td->urb->stream_id);
700 goto remove_finished_td;
701 }
Sarah Sharpae636742009-04-29 19:02:31 -0700702 /*
703 * If we stopped on the TD we need to cancel, then we have to
704 * move the xHC endpoint ring dequeue pointer past this TD.
705 */
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700706 if (cur_td == ep->stopped_td)
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700707 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
708 cur_td->urb->stream_id,
709 cur_td, &deq_state);
Sarah Sharpae636742009-04-29 19:02:31 -0700710 else
Sarah Sharp522989a2011-07-29 12:44:32 -0700711 td_to_noop(xhci, ep_ring, cur_td, false);
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700712remove_finished_td:
Sarah Sharpae636742009-04-29 19:02:31 -0700713 /*
714 * The event handler won't see a completion for this TD anymore,
715 * so remove it from the endpoint ring's TD list. Keep it in
716 * the cancelled TD list for URB completion later.
717 */
Sarah Sharp585df1d2011-08-02 15:43:40 -0700718 list_del_init(&cur_td->td_list);
Sarah Sharpae636742009-04-29 19:02:31 -0700719 }
720 last_unlinked_td = cur_td;
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700721 xhci_stop_watchdog_timer_in_irq(xhci, ep);
Sarah Sharpae636742009-04-29 19:02:31 -0700722
723 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
724 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
Hans de Goede1e3452e2014-08-20 16:41:52 +0300725 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
726 ep->stopped_td->urb->stream_id, &deq_state);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700727 xhci_ring_cmd_db(xhci);
Sarah Sharpae636742009-04-29 19:02:31 -0700728 } else {
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700729 /* Otherwise ring the doorbell(s) to restart queued transfers */
730 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -0700731 }
Florian Wolter526867c2013-08-14 10:33:16 +0200732
Mathias Nymand97b4f82014-11-27 18:19:16 +0200733 ep->stopped_td = NULL;
Sarah Sharpae636742009-04-29 19:02:31 -0700734
735 /*
736 * Drop the lock and complete the URBs in the cancelled TD list.
737 * New TDs to be cancelled might be added to the end of the list before
738 * we can complete all the URBs for the TDs we already unlinked.
739 * So stop when we've completed the URB for the last TD we unlinked.
740 */
741 do {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700742 cur_td = list_entry(ep->cancelled_td_list.next,
Sarah Sharpae636742009-04-29 19:02:31 -0700743 struct xhci_td, cancelled_td_list);
Sarah Sharp585df1d2011-08-02 15:43:40 -0700744 list_del_init(&cur_td->cancelled_td_list);
Sarah Sharpae636742009-04-29 19:02:31 -0700745
746 /* Clean up the cancelled URB */
Sarah Sharpae636742009-04-29 19:02:31 -0700747 /* Doesn't matter what we pass for status, since the core will
748 * just overwrite it (because the URB has been unlinked).
749 */
Arnd Bergmannf76a28a2016-06-30 14:26:17 +0200750 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300751 if (ep_ring && cur_td->bounce_seg)
752 xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
Xenia Ragiadakou07a37e92013-09-09 13:29:45 +0300753 xhci_giveback_urb_in_irq(xhci, cur_td, 0);
Sarah Sharpae636742009-04-29 19:02:31 -0700754
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700755 /* Stop processing the cancelled list if the watchdog timer is
756 * running.
757 */
758 if (xhci->xhc_state & XHCI_STATE_DYING)
759 return;
Sarah Sharpae636742009-04-29 19:02:31 -0700760 } while (cur_td != last_unlinked_td);
761
762 /* Return to the event handler with xhci->lock re-acquired */
763}
764
Sarah Sharp50e87252014-02-21 09:27:30 -0800765static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
766{
767 struct xhci_td *cur_td;
768
769 while (!list_empty(&ring->td_list)) {
770 cur_td = list_first_entry(&ring->td_list,
771 struct xhci_td, td_list);
772 list_del_init(&cur_td->td_list);
773 if (!list_empty(&cur_td->cancelled_td_list))
774 list_del_init(&cur_td->cancelled_td_list);
Mathias Nymanf9c589e2016-06-21 10:58:02 +0300775
776 if (cur_td->bounce_seg)
777 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
Sarah Sharp50e87252014-02-21 09:27:30 -0800778 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
779 }
780}
781
782static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
783 int slot_id, int ep_index)
784{
785 struct xhci_td *cur_td;
786 struct xhci_virt_ep *ep;
787 struct xhci_ring *ring;
788
789 ep = &xhci->devs[slot_id]->eps[ep_index];
Sarah Sharp21d0e512014-02-21 14:29:02 -0800790 if ((ep->ep_state & EP_HAS_STREAMS) ||
791 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
792 int stream_id;
793
794 for (stream_id = 0; stream_id < ep->stream_info->num_streams;
795 stream_id++) {
796 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
797 "Killing URBs for slot ID %u, ep index %u, stream %u",
798 slot_id, ep_index, stream_id + 1);
799 xhci_kill_ring_urbs(xhci,
800 ep->stream_info->stream_rings[stream_id]);
801 }
802 } else {
803 ring = ep->ring;
804 if (!ring)
805 return;
806 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
807 "Killing URBs for slot ID %u, ep index %u",
808 slot_id, ep_index);
809 xhci_kill_ring_urbs(xhci, ring);
810 }
Sarah Sharp50e87252014-02-21 09:27:30 -0800811 while (!list_empty(&ep->cancelled_td_list)) {
812 cur_td = list_first_entry(&ep->cancelled_td_list,
813 struct xhci_td, cancelled_td_list);
814 list_del_init(&cur_td->cancelled_td_list);
815 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
816 }
817}
818
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700819/* Watchdog timer function for when a stop endpoint command fails to complete.
820 * In this case, we assume the host controller is broken or dying or dead. The
821 * host may still be completing some other events, so we have to be careful to
822 * let the event ring handler and the URB dequeueing/enqueueing functions know
823 * through xhci->state.
824 *
825 * The timer may also fire if the host takes a very long time to respond to the
826 * command, and the stop endpoint command completion handler cannot delete the
827 * timer before the timer function is called. Another endpoint cancellation may
828 * sneak in before the timer function can grab the lock, and that may queue
829 * another stop endpoint command and add the timer back. So we cannot use a
830 * simple flag to say whether there is a pending stop endpoint command for a
831 * particular endpoint.
832 *
833 * Instead we use a combination of that flag and a counter for the number of
834 * pending stop endpoint commands. If the timer is the tail end of the last
835 * stop endpoint command, and the endpoint's command is still pending, we assume
836 * the host is dying.
837 */
838void xhci_stop_endpoint_command_watchdog(unsigned long arg)
839{
840 struct xhci_hcd *xhci;
841 struct xhci_virt_ep *ep;
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700842 int ret, i, j;
Don Zickusf43d6232011-10-20 23:52:14 -0400843 unsigned long flags;
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700844
845 ep = (struct xhci_virt_ep *) arg;
846 xhci = ep->xhci;
847
Don Zickusf43d6232011-10-20 23:52:14 -0400848 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700849
850 ep->stop_cmds_pending--;
Mathias Nymanbcf42aa2016-09-07 17:26:33 +0300851 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
852 spin_unlock_irqrestore(&xhci->lock, flags);
853 return;
854 }
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700855 if (xhci->xhc_state & XHCI_STATE_DYING) {
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300856 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
857 "Stop EP timer ran, but another timer marked "
858 "xHCI as DYING, exiting.");
Don Zickusf43d6232011-10-20 23:52:14 -0400859 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700860 return;
861 }
862 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300863 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
864 "Stop EP timer ran, but no command pending, "
865 "exiting.");
Don Zickusf43d6232011-10-20 23:52:14 -0400866 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700867 return;
868 }
869
870 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
871 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
872 /* Oops, HC is dead or dying or at least not responding to the stop
873 * endpoint command.
874 */
875 xhci->xhc_state |= XHCI_STATE_DYING;
876 /* Disable interrupts from the host controller and start halting it */
877 xhci_quiesce(xhci);
Don Zickusf43d6232011-10-20 23:52:14 -0400878 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700879
880 ret = xhci_halt(xhci);
881
Don Zickusf43d6232011-10-20 23:52:14 -0400882 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700883 if (ret < 0) {
884 /* This is bad; the host is not responding to commands and it's
885 * not allowing itself to be halted. At least interrupts are
Sarah Sharpac04e6f2011-03-11 08:47:33 -0800886 * disabled. If we call usb_hc_died(), it will attempt to
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700887 * disconnect all device drivers under this host. Those
888 * disconnect() methods will wait for all URBs to be unlinked,
889 * so we must complete them.
890 */
891 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
892 xhci_warn(xhci, "Completing active URBs anyway.\n");
893 /* We could turn all TDs on the rings to no-ops. This won't
894 * help if the host has cached part of the ring, and is slow if
895 * we want to preserve the cycle bit. Skip it and hope the host
896 * doesn't touch the memory.
897 */
898 }
899 for (i = 0; i < MAX_HC_SLOTS; i++) {
900 if (!xhci->devs[i])
901 continue;
Sarah Sharp50e87252014-02-21 09:27:30 -0800902 for (j = 0; j < 31; j++)
903 xhci_kill_endpoint_urbs(xhci, i, j);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700904 }
Don Zickusf43d6232011-10-20 23:52:14 -0400905 spin_unlock_irqrestore(&xhci->lock, flags);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300906 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
907 "Calling usb_hc_died()");
Mathias Nymanbcf42aa2016-09-07 17:26:33 +0300908 usb_hc_died(xhci_to_hcd(xhci));
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +0300909 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
910 "xHCI host controller is dead.");
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700911}
912
Andiry Xub008df62012-03-05 17:49:34 +0800913
914static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
915 struct xhci_virt_device *dev,
916 struct xhci_ring *ep_ring,
917 unsigned int ep_index)
918{
919 union xhci_trb *dequeue_temp;
920 int num_trbs_free_temp;
921 bool revert = false;
922
923 num_trbs_free_temp = ep_ring->num_trbs_free;
924 dequeue_temp = ep_ring->dequeue;
925
Sarah Sharp0d9f78a2012-06-21 16:28:30 -0700926 /* If we get two back-to-back stalls, and the first stalled transfer
927 * ends just before a link TRB, the dequeue pointer will be left on
928 * the link TRB by the code in the while loop. So we have to update
929 * the dequeue pointer one segment further, or we'll jump off
930 * the segment into la-la-land.
931 */
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300932 if (trb_is_link(ep_ring->dequeue)) {
Sarah Sharp0d9f78a2012-06-21 16:28:30 -0700933 ep_ring->deq_seg = ep_ring->deq_seg->next;
934 ep_ring->dequeue = ep_ring->deq_seg->trbs;
935 }
936
Andiry Xub008df62012-03-05 17:49:34 +0800937 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
938 /* We have more usable TRBs */
939 ep_ring->num_trbs_free++;
940 ep_ring->dequeue++;
Mathias Nyman2d98ef42016-06-21 10:58:04 +0300941 if (trb_is_link(ep_ring->dequeue)) {
Andiry Xub008df62012-03-05 17:49:34 +0800942 if (ep_ring->dequeue ==
943 dev->eps[ep_index].queued_deq_ptr)
944 break;
945 ep_ring->deq_seg = ep_ring->deq_seg->next;
946 ep_ring->dequeue = ep_ring->deq_seg->trbs;
947 }
948 if (ep_ring->dequeue == dequeue_temp) {
949 revert = true;
950 break;
951 }
952 }
953
954 if (revert) {
955 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
956 ep_ring->num_trbs_free = num_trbs_free_temp;
957 }
958}
959
Sarah Sharpae636742009-04-29 19:02:31 -0700960/*
961 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
962 * we need to clear the set deq pending flag in the endpoint ring state, so that
963 * the TD queueing code can ring the doorbell again. We also need to ring the
964 * endpoint doorbell to restart the ring, but only if there aren't more
965 * cancellations pending.
966 */
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +0300967static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +0300968 union xhci_trb *trb, u32 cmd_comp_code)
Sarah Sharpae636742009-04-29 19:02:31 -0700969{
Sarah Sharpae636742009-04-29 19:02:31 -0700970 unsigned int ep_index;
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700971 unsigned int stream_id;
Sarah Sharpae636742009-04-29 19:02:31 -0700972 struct xhci_ring *ep_ring;
973 struct xhci_virt_device *dev;
Hans de Goede9aad95e2013-10-04 00:29:49 +0200974 struct xhci_virt_ep *ep;
John Yound115b042009-07-27 12:05:15 -0700975 struct xhci_ep_ctx *ep_ctx;
976 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpae636742009-04-29 19:02:31 -0700977
Matt Evans28ccd292011-03-29 13:40:46 +1100978 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
979 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
Sarah Sharpae636742009-04-29 19:02:31 -0700980 dev = xhci->devs[slot_id];
Hans de Goede9aad95e2013-10-04 00:29:49 +0200981 ep = &dev->eps[ep_index];
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700982
983 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
984 if (!ep_ring) {
Oliver Neukume587b8b2014-01-08 17:13:11 +0100985 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700986 stream_id);
987 /* XXX: Harmless??? */
Hans de Goede0d4976e2014-08-20 16:41:55 +0300988 goto cleanup;
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700989 }
990
John Yound115b042009-07-27 12:05:15 -0700991 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
992 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
Sarah Sharpae636742009-04-29 19:02:31 -0700993
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +0300994 if (cmd_comp_code != COMP_SUCCESS) {
Sarah Sharpae636742009-04-29 19:02:31 -0700995 unsigned int ep_state;
996 unsigned int slot_state;
997
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +0300998 switch (cmd_comp_code) {
Sarah Sharpae636742009-04-29 19:02:31 -0700999 case COMP_TRB_ERR:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001000 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
Sarah Sharpae636742009-04-29 19:02:31 -07001001 break;
1002 case COMP_CTX_STATE:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001003 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
Matt Evans28ccd292011-03-29 13:40:46 +11001004 ep_state = le32_to_cpu(ep_ctx->ep_info);
Sarah Sharpae636742009-04-29 19:02:31 -07001005 ep_state &= EP_STATE_MASK;
Matt Evans28ccd292011-03-29 13:40:46 +11001006 slot_state = le32_to_cpu(slot_ctx->dev_state);
Sarah Sharpae636742009-04-29 19:02:31 -07001007 slot_state = GET_SLOT_STATE(slot_state);
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +03001008 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1009 "Slot state = %u, EP state = %u",
Sarah Sharpae636742009-04-29 19:02:31 -07001010 slot_state, ep_state);
1011 break;
1012 case COMP_EBADSLT:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001013 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1014 slot_id);
Sarah Sharpae636742009-04-29 19:02:31 -07001015 break;
1016 default:
Oliver Neukume587b8b2014-01-08 17:13:11 +01001017 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1018 cmd_comp_code);
Sarah Sharpae636742009-04-29 19:02:31 -07001019 break;
1020 }
1021 /* OK what do we do now? The endpoint state is hosed, and we
1022 * should never get to this point if the synchronization between
1023 * queueing, and endpoint state are correct. This might happen
1024 * if the device gets disconnected after we've finished
1025 * cancelling URBs, which might not be an error...
1026 */
1027 } else {
Hans de Goede9aad95e2013-10-04 00:29:49 +02001028 u64 deq;
1029 /* 4.6.10 deq ptr is written to the stream ctx for streams */
1030 if (ep->ep_state & EP_HAS_STREAMS) {
1031 struct xhci_stream_ctx *ctx =
1032 &ep->stream_info->stream_ctx_array[stream_id];
1033 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1034 } else {
1035 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1036 }
Xenia Ragiadakouaa50b292013-08-14 06:33:54 +03001037 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
Hans de Goede9aad95e2013-10-04 00:29:49 +02001038 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1039 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1040 ep->queued_deq_ptr) == deq) {
Sarah Sharpbf161e82011-02-23 15:46:42 -08001041 /* Update the ring's dequeue segment and dequeue pointer
1042 * to reflect the new position.
1043 */
Andiry Xub008df62012-03-05 17:49:34 +08001044 update_ring_for_set_deq_completion(xhci, dev,
1045 ep_ring, ep_index);
Sarah Sharpbf161e82011-02-23 15:46:42 -08001046 } else {
Oliver Neukume587b8b2014-01-08 17:13:11 +01001047 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
Sarah Sharpbf161e82011-02-23 15:46:42 -08001048 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
Hans de Goede9aad95e2013-10-04 00:29:49 +02001049 ep->queued_deq_seg, ep->queued_deq_ptr);
Sarah Sharpbf161e82011-02-23 15:46:42 -08001050 }
Sarah Sharpae636742009-04-29 19:02:31 -07001051 }
1052
Hans de Goede0d4976e2014-08-20 16:41:55 +03001053cleanup:
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001054 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
Sarah Sharpbf161e82011-02-23 15:46:42 -08001055 dev->eps[ep_index].queued_deq_seg = NULL;
1056 dev->eps[ep_index].queued_deq_ptr = NULL;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001057 /* Restart any rings with pending URBs */
1058 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
Sarah Sharpae636742009-04-29 19:02:31 -07001059}
1060
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001061static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001062 union xhci_trb *trb, u32 cmd_comp_code)
Sarah Sharpa1587d92009-07-27 12:03:15 -07001063{
Sarah Sharpa1587d92009-07-27 12:03:15 -07001064 unsigned int ep_index;
1065
Matt Evans28ccd292011-03-29 13:40:46 +11001066 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
Sarah Sharpa1587d92009-07-27 12:03:15 -07001067 /* This command will only fail if the endpoint wasn't halted,
1068 * but we don't care.
1069 */
Xenia Ragiadakoua0254322013-08-06 07:52:46 +03001070 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001071 "Ignoring reset ep completion code of %u", cmd_comp_code);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001072
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001073 /* HW with the reset endpoint quirk needs to have a configure endpoint
1074 * command complete before the endpoint can be used. Queue that here
1075 * because the HW can't handle two commands being queued in a row.
1076 */
1077 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001078 struct xhci_command *command;
1079 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
Hans de Goedea0ee6192014-07-25 22:01:21 +02001080 if (!command) {
1081 xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
1082 return;
1083 }
Xenia Ragiadakou4bdfe4c2013-08-06 07:52:45 +03001084 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1085 "Queueing configure endpoint command");
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001086 xhci_queue_configure_endpoint(xhci, command,
Sarah Sharp913a8a32009-09-04 10:53:13 -07001087 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1088 false);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001089 xhci_ring_cmd_db(xhci);
1090 } else {
Mathias Nymanc3492db2014-11-18 11:27:11 +02001091 /* Clear our internal halted state */
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001092 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001093 }
Sarah Sharpa1587d92009-07-27 12:03:15 -07001094}
Sarah Sharpae636742009-04-29 19:02:31 -07001095
Xenia Ragiadakoub244b432013-09-09 13:29:47 +03001096static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1097 u32 cmd_comp_code)
1098{
1099 if (cmd_comp_code == COMP_SUCCESS)
1100 xhci->slot_id = slot_id;
1101 else
1102 xhci->slot_id = 0;
Xenia Ragiadakoub244b432013-09-09 13:29:47 +03001103}
1104
Xenia Ragiadakou6c02dd12013-09-09 13:29:48 +03001105static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1106{
1107 struct xhci_virt_device *virt_dev;
1108
1109 virt_dev = xhci->devs[slot_id];
1110 if (!virt_dev)
1111 return;
1112 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1113 /* Delete default control endpoint resources */
1114 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1115 xhci_free_virt_device(xhci, slot_id);
1116}
1117
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001118static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1119 struct xhci_event_cmd *event, u32 cmd_comp_code)
1120{
1121 struct xhci_virt_device *virt_dev;
1122 struct xhci_input_control_ctx *ctrl_ctx;
1123 unsigned int ep_index;
1124 unsigned int ep_state;
1125 u32 add_flags, drop_flags;
1126
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001127 /*
1128 * Configure endpoint commands can come from the USB core
1129 * configuration or alt setting changes, or because the HW
1130 * needed an extra configure endpoint command after a reset
1131 * endpoint command or streams were being configured.
1132 * If the command was for a halted endpoint, the xHCI driver
1133 * is not waiting on the configure endpoint command.
1134 */
Mathias Nyman9ea18332014-05-08 19:26:02 +03001135 virt_dev = xhci->devs[slot_id];
Lin Wang4daf9df2015-01-09 16:06:31 +02001136 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001137 if (!ctrl_ctx) {
1138 xhci_warn(xhci, "Could not get input context, bad type.\n");
1139 return;
1140 }
1141
1142 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1143 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1144 /* Input ctx add_flags are the endpoint index plus one */
1145 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1146
1147 /* A usb_set_interface() call directly after clearing a halted
1148 * condition may race on this quirky hardware. Not worth
1149 * worrying about, since this is prototype hardware. Not sure
1150 * if this will work for streams, but streams support was
1151 * untested on this prototype.
1152 */
1153 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1154 ep_index != (unsigned int) -1 &&
1155 add_flags - SLOT_FLAG == drop_flags) {
1156 ep_state = virt_dev->eps[ep_index].ep_state;
1157 if (!(ep_state & EP_HALTED))
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001158 return;
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001159 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1160 "Completed config ep cmd - "
1161 "last ep index = %d, state = %d",
1162 ep_index, ep_state);
1163 /* Clear internal halted state and restart ring(s) */
1164 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1165 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1166 return;
1167 }
Xenia Ragiadakou6ed46d32013-09-09 13:29:55 +03001168 return;
1169}
1170
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001171static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1172 struct xhci_event_cmd *event)
1173{
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001174 xhci_dbg(xhci, "Completed reset device command.\n");
Mathias Nyman9ea18332014-05-08 19:26:02 +03001175 if (!xhci->devs[slot_id])
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001176 xhci_warn(xhci, "Reset device command completion "
1177 "for disabled slot %u\n", slot_id);
1178}
1179
Xenia Ragiadakou2c070822013-09-09 13:29:52 +03001180static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1181 struct xhci_event_cmd *event)
1182{
1183 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1184 xhci->error_bitmask |= 1 << 6;
1185 return;
1186 }
1187 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1188 "NEC firmware version %2x.%02x",
1189 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1190 NEC_FW_MINOR(le32_to_cpu(event->status)));
1191}
1192
Mathias Nyman9ea18332014-05-08 19:26:02 +03001193static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001194{
1195 list_del(&cmd->cmd_list);
Mathias Nyman9ea18332014-05-08 19:26:02 +03001196
1197 if (cmd->completion) {
1198 cmd->status = status;
1199 complete(cmd->completion);
1200 } else {
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001201 kfree(cmd);
Mathias Nyman9ea18332014-05-08 19:26:02 +03001202 }
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001203}
1204
1205void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1206{
1207 struct xhci_command *cur_cmd, *tmp_cmd;
1208 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
Mathias Nyman9ea18332014-05-08 19:26:02 +03001209 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001210}
1211
Mathias Nymanc311e392014-05-08 19:26:03 +03001212/*
1213 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
1214 * If there are other commands waiting then restart the ring and kick the timer.
1215 * This must be called with command ring stopped and xhci->lock held.
1216 */
1217static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1218 struct xhci_command *cur_cmd)
1219{
1220 struct xhci_command *i_cmd, *tmp_cmd;
1221 u32 cycle_state;
1222
1223 /* Turn all aborted commands in list to no-ops, then restart */
1224 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
1225 cmd_list) {
1226
1227 if (i_cmd->status != COMP_CMD_ABORT)
1228 continue;
1229
1230 i_cmd->status = COMP_CMD_STOP;
1231
1232 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
1233 i_cmd->command_trb);
1234 /* get cycle state from the original cmd trb */
1235 cycle_state = le32_to_cpu(
1236 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
1237 /* modify the command trb to no-op command */
1238 i_cmd->command_trb->generic.field[0] = 0;
1239 i_cmd->command_trb->generic.field[1] = 0;
1240 i_cmd->command_trb->generic.field[2] = 0;
1241 i_cmd->command_trb->generic.field[3] = cpu_to_le32(
1242 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1243
1244 /*
1245 * caller waiting for completion is called when command
1246 * completion event is received for these no-op commands
1247 */
1248 }
1249
1250 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1251
1252 /* ring command ring doorbell to restart the command ring */
1253 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
1254 !(xhci->xhc_state & XHCI_STATE_DYING)) {
1255 xhci->current_cmd = cur_cmd;
1256 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1257 xhci_ring_cmd_db(xhci);
1258 }
1259 return;
1260}
1261
1262
1263void xhci_handle_command_timeout(unsigned long data)
1264{
1265 struct xhci_hcd *xhci;
1266 int ret;
1267 unsigned long flags;
1268 u64 hw_ring_state;
Mathias Nyman3425aa02016-06-01 18:09:08 +03001269 bool second_timeout = false;
Mathias Nymanc311e392014-05-08 19:26:03 +03001270 xhci = (struct xhci_hcd *) data;
1271
1272 /* mark this command to be cancelled */
1273 spin_lock_irqsave(&xhci->lock, flags);
1274 if (xhci->current_cmd) {
Mathias Nyman3425aa02016-06-01 18:09:08 +03001275 if (xhci->current_cmd->status == COMP_CMD_ABORT)
1276 second_timeout = true;
1277 xhci->current_cmd->status = COMP_CMD_ABORT;
Mathias Nymanc311e392014-05-08 19:26:03 +03001278 }
1279
Mathias Nymanc311e392014-05-08 19:26:03 +03001280 /* Make sure command ring is running before aborting it */
1281 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1282 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1283 (hw_ring_state & CMD_RING_RUNNING)) {
Mathias Nymanc311e392014-05-08 19:26:03 +03001284 spin_unlock_irqrestore(&xhci->lock, flags);
1285 xhci_dbg(xhci, "Command timeout\n");
1286 ret = xhci_abort_cmd_ring(xhci);
1287 if (unlikely(ret == -ESHUTDOWN)) {
1288 xhci_err(xhci, "Abort command ring failed\n");
1289 xhci_cleanup_command_queue(xhci);
1290 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1291 xhci_dbg(xhci, "xHCI host controller is dead.\n");
1292 }
1293 return;
1294 }
Mathias Nyman3425aa02016-06-01 18:09:08 +03001295
1296 /* command ring failed to restart, or host removed. Bail out */
1297 if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
1298 spin_unlock_irqrestore(&xhci->lock, flags);
1299 xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
1300 xhci_cleanup_command_queue(xhci);
1301 return;
1302 }
1303
Mathias Nymanc311e392014-05-08 19:26:03 +03001304 /* command timeout on stopped ring, ring can't be aborted */
1305 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1306 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1307 spin_unlock_irqrestore(&xhci->lock, flags);
1308 return;
1309}
1310
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001311static void handle_cmd_completion(struct xhci_hcd *xhci,
1312 struct xhci_event_cmd *event)
1313{
Matt Evans28ccd292011-03-29 13:40:46 +11001314 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001315 u64 cmd_dma;
1316 dma_addr_t cmd_dequeue_dma;
Xenia Ragiadakoue7a79a12013-09-09 13:29:56 +03001317 u32 cmd_comp_code;
Xenia Ragiadakou9124b122013-09-09 13:29:57 +03001318 union xhci_trb *cmd_trb;
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001319 struct xhci_command *cmd;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001320 u32 cmd_type;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001321
Matt Evans28ccd292011-03-29 13:40:46 +11001322 cmd_dma = le64_to_cpu(event->cmd_trb);
Xenia Ragiadakou9124b122013-09-09 13:29:57 +03001323 cmd_trb = xhci->cmd_ring->dequeue;
Sarah Sharp23e3be12009-04-29 19:05:20 -07001324 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
Xenia Ragiadakou9124b122013-09-09 13:29:57 +03001325 cmd_trb);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001326 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1327 if (cmd_dequeue_dma == 0) {
1328 xhci->error_bitmask |= 1 << 4;
1329 return;
1330 }
1331 /* Does the DMA address match our internal dequeue pointer address? */
1332 if (cmd_dma != (u64) cmd_dequeue_dma) {
1333 xhci->error_bitmask |= 1 << 5;
1334 return;
1335 }
Elric Fub63f4052012-06-27 16:55:43 +08001336
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001337 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
1338
Mathias Nymanc311e392014-05-08 19:26:03 +03001339 del_timer(&xhci->cmd_timer);
1340
Xenia Ragiadakou9124b122013-09-09 13:29:57 +03001341 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
Xenia Ragiadakou63a23b9a2013-08-06 07:52:48 +03001342
Xenia Ragiadakoue7a79a12013-09-09 13:29:56 +03001343 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
Mathias Nymanc311e392014-05-08 19:26:03 +03001344
1345 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1346 if (cmd_comp_code == COMP_CMD_STOP) {
1347 xhci_handle_stopped_cmd_ring(xhci, cmd);
1348 return;
1349 }
Mathias Nyman33be1262016-08-16 10:18:03 +03001350
1351 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1352 xhci_err(xhci,
1353 "Command completion event does not match command\n");
1354 return;
1355 }
1356
Mathias Nymanc311e392014-05-08 19:26:03 +03001357 /*
1358 * Host aborted the command ring, check if the current command was
1359 * supposed to be aborted, otherwise continue normally.
1360 * The command ring is stopped now, but the xHC will issue a Command
1361 * Ring Stopped event which will cause us to restart it.
1362 */
1363 if (cmd_comp_code == COMP_CMD_ABORT) {
1364 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1365 if (cmd->status == COMP_CMD_ABORT)
1366 goto event_handled;
Elric Fub63f4052012-06-27 16:55:43 +08001367 }
1368
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001369 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1370 switch (cmd_type) {
1371 case TRB_ENABLE_SLOT:
Xenia Ragiadakoue7a79a12013-09-09 13:29:56 +03001372 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001373 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001374 case TRB_DISABLE_SLOT:
Xenia Ragiadakou6c02dd12013-09-09 13:29:48 +03001375 xhci_handle_cmd_disable_slot(xhci, slot_id);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001376 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001377 case TRB_CONFIG_EP:
Mathias Nyman9ea18332014-05-08 19:26:02 +03001378 if (!cmd->completion)
1379 xhci_handle_cmd_config_ep(xhci, slot_id, event,
1380 cmd_comp_code);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001381 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001382 case TRB_EVAL_CONTEXT:
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001383 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001384 case TRB_ADDR_DEV:
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001385 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001386 case TRB_STOP_RING:
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001387 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1388 le32_to_cpu(cmd_trb->generic.field[3])));
1389 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
Sarah Sharpae636742009-04-29 19:02:31 -07001390 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001391 case TRB_SET_DEQ:
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001392 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1393 le32_to_cpu(cmd_trb->generic.field[3])));
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001394 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
Sarah Sharpae636742009-04-29 19:02:31 -07001395 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001396 case TRB_CMD_NOOP:
Mathias Nymanc311e392014-05-08 19:26:03 +03001397 /* Is this an aborted command turned to NO-OP? */
1398 if (cmd->status == COMP_CMD_STOP)
1399 cmd_comp_code = COMP_CMD_STOP;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001400 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001401 case TRB_RESET_EP:
Xenia Ragiadakoub8200c92013-09-09 13:30:00 +03001402 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1403 le32_to_cpu(cmd_trb->generic.field[3])));
Xenia Ragiadakouc69a0592013-09-09 13:30:01 +03001404 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001405 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001406 case TRB_RESET_DEV:
Mathias Nyman6fcfb0d2014-06-24 17:14:40 +03001407 /* SLOT_ID field in reset device cmd completion event TRB is 0.
1408 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1409 */
1410 slot_id = TRB_TO_SLOT_ID(
1411 le32_to_cpu(cmd_trb->generic.field[3]));
Xenia Ragiadakouf6813212013-09-09 13:29:51 +03001412 xhci_handle_cmd_reset_dev(xhci, slot_id, event);
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08001413 break;
Xenia Ragiadakoub54fc462013-09-09 13:29:58 +03001414 case TRB_NEC_GET_FW:
Xenia Ragiadakou2c070822013-09-09 13:29:52 +03001415 xhci_handle_cmd_nec_get_fw(xhci, event);
Sarah Sharp02386342010-05-24 13:25:28 -07001416 break;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001417 default:
1418 /* Skip over unknown commands on the event ring */
1419 xhci->error_bitmask |= 1 << 6;
1420 break;
1421 }
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001422
Mathias Nymanc311e392014-05-08 19:26:03 +03001423 /* restart timer if this wasn't the last command */
1424 if (cmd->cmd_list.next != &xhci->cmd_list) {
1425 xhci->current_cmd = list_entry(cmd->cmd_list.next,
1426 struct xhci_command, cmd_list);
1427 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1428 }
1429
1430event_handled:
Mathias Nyman9ea18332014-05-08 19:26:02 +03001431 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03001432
Andiry Xu3b72fca2012-03-05 17:49:32 +08001433 inc_deq(xhci, xhci->cmd_ring);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07001434}
1435
Sarah Sharp02386342010-05-24 13:25:28 -07001436static void handle_vendor_event(struct xhci_hcd *xhci,
1437 union xhci_trb *event)
1438{
1439 u32 trb_type;
1440
Matt Evans28ccd292011-03-29 13:40:46 +11001441 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
Sarah Sharp02386342010-05-24 13:25:28 -07001442 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1443 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1444 handle_cmd_completion(xhci, &event->event_cmd);
1445}
1446
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001447/* @port_id: the one-based port ID from the hardware (indexed from array of all
1448 * port registers -- USB 3.0 and USB 2.0).
1449 *
1450 * Returns a zero-based port number, which is suitable for indexing into each of
1451 * the split roothubs' port arrays and bus state arrays.
Sarah Sharpd0cd5d42011-11-14 17:51:39 -08001452 * Add one to it in order to call xhci_find_slot_id_by_port.
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001453 */
1454static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1455 struct xhci_hcd *xhci, u32 port_id)
1456{
1457 unsigned int i;
1458 unsigned int num_similar_speed_ports = 0;
1459
1460 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1461 * and usb2_ports are 0-based indexes. Count the number of similar
1462 * speed ports, up to 1 port before this port.
1463 */
1464 for (i = 0; i < (port_id - 1); i++) {
1465 u8 port_speed = xhci->port_array[i];
1466
1467 /*
1468 * Skip ports that don't have known speeds, or have duplicate
1469 * Extended Capabilities port speed entries.
1470 */
Dan Carpenter22e04872011-03-17 22:39:49 +03001471 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001472 continue;
1473
1474 /*
1475 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1476 * 1.1 ports are under the USB 2.0 hub. If the port speed
1477 * matches the device speed, it's a similar speed port.
1478 */
Mathias Nymanb50107b2015-10-01 18:40:38 +03001479 if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001480 num_similar_speed_ports++;
1481 }
1482 return num_similar_speed_ports;
1483}
1484
Sarah Sharp623bef92011-11-11 14:57:33 -08001485static void handle_device_notification(struct xhci_hcd *xhci,
1486 union xhci_trb *event)
1487{
1488 u32 slot_id;
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001489 struct usb_device *udev;
Sarah Sharp623bef92011-11-11 14:57:33 -08001490
Xenia Ragiadakou7e76ad42013-09-09 21:03:10 +03001491 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001492 if (!xhci->devs[slot_id]) {
Sarah Sharp623bef92011-11-11 14:57:33 -08001493 xhci_warn(xhci, "Device Notification event for "
1494 "unused slot %u\n", slot_id);
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001495 return;
1496 }
1497
1498 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1499 slot_id);
1500 udev = xhci->devs[slot_id]->udev;
1501 if (udev && udev->parent)
1502 usb_wakeup_notification(udev->parent, udev->portnum);
Sarah Sharp623bef92011-11-11 14:57:33 -08001503}
1504
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001505static void handle_port_status(struct xhci_hcd *xhci,
1506 union xhci_trb *event)
1507{
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001508 struct usb_hcd *hcd;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001509 u32 port_id;
Andiry Xu56192532010-10-14 07:23:00 -07001510 u32 temp, temp1;
Sarah Sharp518e8482010-12-15 11:56:29 -08001511 int max_ports;
Andiry Xu56192532010-10-14 07:23:00 -07001512 int slot_id;
Sarah Sharp5308a912010-12-01 11:34:59 -08001513 unsigned int faked_port_index;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001514 u8 major_revision;
Sarah Sharp20b67cf2010-12-15 12:47:14 -08001515 struct xhci_bus_state *bus_state;
Matt Evans28ccd292011-03-29 13:40:46 +11001516 __le32 __iomem **port_array;
Sarah Sharp386139d2011-03-24 08:02:58 -07001517 bool bogus_port_status = false;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001518
1519 /* Port status change events always have a successful completion code */
Matt Evans28ccd292011-03-29 13:40:46 +11001520 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001521 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1522 xhci->error_bitmask |= 1 << 8;
1523 }
Matt Evans28ccd292011-03-29 13:40:46 +11001524 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001525 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1526
Sarah Sharp518e8482010-12-15 11:56:29 -08001527 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1528 if ((port_id <= 0) || (port_id > max_ports)) {
Andiry Xu56192532010-10-14 07:23:00 -07001529 xhci_warn(xhci, "Invalid port id %d\n", port_id);
Peter Chen09ce0c02013-03-20 09:30:00 +08001530 inc_deq(xhci, xhci->event_ring);
1531 return;
Andiry Xu56192532010-10-14 07:23:00 -07001532 }
1533
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001534 /* Figure out which usb_hcd this port is attached to:
1535 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1536 */
1537 major_revision = xhci->port_array[port_id - 1];
Peter Chen09ce0c02013-03-20 09:30:00 +08001538
1539 /* Find the right roothub. */
1540 hcd = xhci_to_hcd(xhci);
Mathias Nymanb50107b2015-10-01 18:40:38 +03001541 if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
Peter Chen09ce0c02013-03-20 09:30:00 +08001542 hcd = xhci->shared_hcd;
1543
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001544 if (major_revision == 0) {
1545 xhci_warn(xhci, "Event for port %u not in "
1546 "Extended Capabilities, ignoring.\n",
1547 port_id);
Sarah Sharp386139d2011-03-24 08:02:58 -07001548 bogus_port_status = true;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001549 goto cleanup;
1550 }
Dan Carpenter22e04872011-03-17 22:39:49 +03001551 if (major_revision == DUPLICATE_ENTRY) {
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001552 xhci_warn(xhci, "Event for port %u duplicated in"
1553 "Extended Capabilities, ignoring.\n",
1554 port_id);
Sarah Sharp386139d2011-03-24 08:02:58 -07001555 bogus_port_status = true;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001556 goto cleanup;
Sarah Sharp5308a912010-12-01 11:34:59 -08001557 }
1558
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001559 /*
1560 * Hardware port IDs reported by a Port Status Change Event include USB
1561 * 3.0 and USB 2.0 ports. We want to check if the port has reported a
1562 * resume event, but we first need to translate the hardware port ID
1563 * into the index into the ports on the correct split roothub, and the
1564 * correct bus_state structure.
1565 */
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001566 bus_state = &xhci->bus_state[hcd_index(hcd)];
Mathias Nymanb50107b2015-10-01 18:40:38 +03001567 if (hcd->speed >= HCD_USB3)
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001568 port_array = xhci->usb3_ports;
1569 else
1570 port_array = xhci->usb2_ports;
1571 /* Find the faked port hub number */
1572 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1573 port_id);
1574
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +02001575 temp = readl(port_array[faked_port_index]);
Sarah Sharp7111ebc2010-12-14 13:24:55 -08001576 if (hcd->state == HC_STATE_SUSPENDED) {
Andiry Xu56192532010-10-14 07:23:00 -07001577 xhci_dbg(xhci, "resume root hub\n");
1578 usb_hcd_resume_root_hub(hcd);
1579 }
1580
Mathias Nymanb50107b2015-10-01 18:40:38 +03001581 if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
Zhuang Jin Canfac42712015-07-21 17:20:30 +03001582 bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1583
Andiry Xu56192532010-10-14 07:23:00 -07001584 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1585 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1586
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +02001587 temp1 = readl(&xhci->op_regs->command);
Andiry Xu56192532010-10-14 07:23:00 -07001588 if (!(temp1 & CMD_RUN)) {
1589 xhci_warn(xhci, "xHC is not running.\n");
1590 goto cleanup;
1591 }
1592
Mathias Nyman2338b9e2015-10-01 18:40:36 +03001593 if (DEV_SUPERSPEED_ANY(temp)) {
Sarah Sharpd93814c2012-01-24 16:39:02 -08001594 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001595 /* Set a flag to say the port signaled remote wakeup,
1596 * so we can tell the difference between the end of
1597 * device and host initiated resume.
1598 */
1599 bus_state->port_remote_wakeup |= 1 << faked_port_index;
Sarah Sharpd93814c2012-01-24 16:39:02 -08001600 xhci_test_and_clear_bit(xhci, port_array,
1601 faked_port_index, PORT_PLC);
Andiry Xuc9682df2011-09-23 14:19:48 -07001602 xhci_set_link_state(xhci, port_array, faked_port_index,
1603 XDEV_U0);
Sarah Sharpd93814c2012-01-24 16:39:02 -08001604 /* Need to wait until the next link state change
1605 * indicates the device is actually in U0.
1606 */
1607 bogus_port_status = true;
1608 goto cleanup;
Mathias Nymanf69115f2015-12-11 14:38:06 +02001609 } else if (!test_bit(faked_port_index,
1610 &bus_state->resuming_ports)) {
Andiry Xu56192532010-10-14 07:23:00 -07001611 xhci_dbg(xhci, "resume HS port %d\n", port_id);
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001612 bus_state->resume_done[faked_port_index] = jiffies +
Felipe Balbib9e45182015-02-13 14:39:13 -06001613 msecs_to_jiffies(USB_RESUME_TIMEOUT);
Andiry Xuf370b992012-04-14 02:54:30 +08001614 set_bit(faked_port_index, &bus_state->resuming_ports);
Andiry Xu56192532010-10-14 07:23:00 -07001615 mod_timer(&hcd->rh_timer,
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001616 bus_state->resume_done[faked_port_index]);
Andiry Xu56192532010-10-14 07:23:00 -07001617 /* Do the rest in GetPortStatus */
1618 }
1619 }
1620
Sarah Sharpd93814c2012-01-24 16:39:02 -08001621 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
Mathias Nyman2338b9e2015-10-01 18:40:36 +03001622 DEV_SUPERSPEED_ANY(temp)) {
Sarah Sharpd93814c2012-01-24 16:39:02 -08001623 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001624 /* We've just brought the device into U0 through either the
1625 * Resume state after a device remote wakeup, or through the
1626 * U3Exit state after a host-initiated resume. If it's a device
1627 * initiated remote wake, don't pass up the link state change,
1628 * so the roothub behavior is consistent with external
1629 * USB 3.0 hub behavior.
1630 */
Sarah Sharpd93814c2012-01-24 16:39:02 -08001631 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1632 faked_port_index + 1);
1633 if (slot_id && xhci->devs[slot_id])
1634 xhci_ring_device(xhci, slot_id);
Nickolai Zeldovichba7b5c22013-01-07 22:39:31 -05001635 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
Sarah Sharp4ee823b2011-11-14 18:00:01 -08001636 bus_state->port_remote_wakeup &=
1637 ~(1 << faked_port_index);
1638 xhci_test_and_clear_bit(xhci, port_array,
1639 faked_port_index, PORT_PLC);
1640 usb_wakeup_notification(hcd->self.root_hub,
1641 faked_port_index + 1);
1642 bogus_port_status = true;
1643 goto cleanup;
1644 }
Sarah Sharpd93814c2012-01-24 16:39:02 -08001645 }
1646
Sarah Sharp8b3d4572013-08-20 08:12:12 -07001647 /*
1648 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1649 * RExit to a disconnect state). If so, let the the driver know it's
1650 * out of the RExit state.
1651 */
Mathias Nyman2338b9e2015-10-01 18:40:36 +03001652 if (!DEV_SUPERSPEED_ANY(temp) &&
Sarah Sharp8b3d4572013-08-20 08:12:12 -07001653 test_and_clear_bit(faked_port_index,
1654 &bus_state->rexit_ports)) {
1655 complete(&bus_state->rexit_done[faked_port_index]);
1656 bogus_port_status = true;
1657 goto cleanup;
1658 }
1659
Mathias Nymanb50107b2015-10-01 18:40:38 +03001660 if (hcd->speed < HCD_USB3)
Andiry Xu6fd45622011-09-23 14:19:50 -07001661 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1662 PORT_PLC);
1663
Andiry Xu56192532010-10-14 07:23:00 -07001664cleanup:
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001665 /* Update event ring dequeue pointer before dropping the lock */
Andiry Xu3b72fca2012-03-05 17:49:32 +08001666 inc_deq(xhci, xhci->event_ring);
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001667
Sarah Sharp386139d2011-03-24 08:02:58 -07001668 /* Don't make the USB core poll the roothub if we got a bad port status
1669 * change event. Besides, at that point we can't tell which roothub
1670 * (USB 2.0 or USB 3.0) to kick.
1671 */
1672 if (bogus_port_status)
1673 return;
1674
Sarah Sharpc52804a2012-11-27 12:30:23 -08001675 /*
1676 * xHCI port-status-change events occur when the "or" of all the
1677 * status-change bits in the portsc register changes from 0 to 1.
1678 * New status changes won't cause an event if any other change
1679 * bits are still set. When an event occurs, switch over to
1680 * polling to avoid losing status changes.
1681 */
1682 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1683 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001684 spin_unlock(&xhci->lock);
1685 /* Pass this up to the core */
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001686 usb_hcd_poll_rh_status(hcd);
Sarah Sharp0f2a7932009-04-27 19:57:12 -07001687 spin_lock(&xhci->lock);
1688}
1689
1690/*
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001691 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1692 * at end_trb, which may be in another segment. If the suspect DMA address is a
1693 * TRB in this TD, this function returns that TRB's segment. Otherwise it
1694 * returns 0.
1695 */
Hans de Goedecffb9be2014-08-20 16:41:51 +03001696struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
1697 struct xhci_segment *start_seg,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001698 union xhci_trb *start_trb,
1699 union xhci_trb *end_trb,
Hans de Goedecffb9be2014-08-20 16:41:51 +03001700 dma_addr_t suspect_dma,
1701 bool debug)
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001702{
1703 dma_addr_t start_dma;
1704 dma_addr_t end_seg_dma;
1705 dma_addr_t end_trb_dma;
1706 struct xhci_segment *cur_seg;
1707
Sarah Sharp23e3be12009-04-29 19:05:20 -07001708 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001709 cur_seg = start_seg;
1710
1711 do {
Sarah Sharp2fa88da2009-11-03 22:02:24 -08001712 if (start_dma == 0)
Randy Dunlap326b4812010-04-19 08:53:50 -07001713 return NULL;
Sarah Sharpae636742009-04-29 19:02:31 -07001714 /* We may get an event for a Link TRB in the middle of a TD */
Sarah Sharp23e3be12009-04-29 19:05:20 -07001715 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
Sarah Sharp2fa88da2009-11-03 22:02:24 -08001716 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001717 /* If the end TRB isn't in this segment, this is set to 0 */
Sarah Sharp23e3be12009-04-29 19:05:20 -07001718 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001719
Hans de Goedecffb9be2014-08-20 16:41:51 +03001720 if (debug)
1721 xhci_warn(xhci,
1722 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
1723 (unsigned long long)suspect_dma,
1724 (unsigned long long)start_dma,
1725 (unsigned long long)end_trb_dma,
1726 (unsigned long long)cur_seg->dma,
1727 (unsigned long long)end_seg_dma);
1728
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001729 if (end_trb_dma > 0) {
1730 /* The end TRB is in this segment, so suspect should be here */
1731 if (start_dma <= end_trb_dma) {
1732 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1733 return cur_seg;
1734 } else {
1735 /* Case for one segment with
1736 * a TD wrapped around to the top
1737 */
1738 if ((suspect_dma >= start_dma &&
1739 suspect_dma <= end_seg_dma) ||
1740 (suspect_dma >= cur_seg->dma &&
1741 suspect_dma <= end_trb_dma))
1742 return cur_seg;
1743 }
Randy Dunlap326b4812010-04-19 08:53:50 -07001744 return NULL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001745 } else {
1746 /* Might still be somewhere in this segment */
1747 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1748 return cur_seg;
1749 }
1750 cur_seg = cur_seg->next;
Sarah Sharp23e3be12009-04-29 19:05:20 -07001751 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
Sarah Sharp2fa88da2009-11-03 22:02:24 -08001752 } while (cur_seg != start_seg);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001753
Randy Dunlap326b4812010-04-19 08:53:50 -07001754 return NULL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001755}
1756
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001757static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1758 unsigned int slot_id, unsigned int ep_index,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001759 unsigned int stream_id,
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001760 struct xhci_td *td, union xhci_trb *event_trb)
1761{
1762 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001763 struct xhci_command *command;
1764 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1765 if (!command)
1766 return;
1767
Mathias Nymand0167ad2015-03-10 19:49:00 +02001768 ep->ep_state |= EP_HALTED;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001769 ep->stopped_stream = stream_id;
Sarah Sharp1624ae12010-05-06 13:40:08 -07001770
Mathias Nymanddba5cd2014-05-08 19:26:00 +03001771 xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
Mathias Nymand97b4f82014-11-27 18:19:16 +02001772 xhci_cleanup_stalled_ring(xhci, ep_index, td);
Sarah Sharp1624ae12010-05-06 13:40:08 -07001773
Sarah Sharp5e5cf6f2010-05-06 13:40:18 -07001774 ep->stopped_stream = 0;
Sarah Sharp1624ae12010-05-06 13:40:08 -07001775
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001776 xhci_ring_cmd_db(xhci);
1777}
1778
1779/* Check if an error has halted the endpoint ring. The class driver will
1780 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1781 * However, a babble and other errors also halt the endpoint ring, and the class
1782 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1783 * Ring Dequeue Pointer command manually.
1784 */
1785static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1786 struct xhci_ep_ctx *ep_ctx,
1787 unsigned int trb_comp_code)
1788{
1789 /* TRB completion codes that may require a manual halt cleanup */
1790 if (trb_comp_code == COMP_TX_ERR ||
1791 trb_comp_code == COMP_BABBLE ||
1792 trb_comp_code == COMP_SPLIT_ERR)
Rajesh Bhagatd4fc8bf2016-03-11 10:27:49 +05301793 /* The 0.95 spec says a babbling control endpoint
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001794 * is not halted. The 0.96 spec says it is. Some HW
1795 * claims to be 0.95 compliant, but it halts the control
1796 * endpoint anyway. Check if a babble halted the
1797 * endpoint.
1798 */
Matt Evansf5960b62011-06-01 10:22:55 +10001799 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1800 cpu_to_le32(EP_STATE_HALTED))
Sarah Sharpbcef3fd2009-11-11 10:28:44 -08001801 return 1;
1802
1803 return 0;
1804}
1805
Sarah Sharpb45b5062009-12-09 15:59:06 -08001806int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1807{
1808 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1809 /* Vendor defined "informational" completion code,
1810 * treat as not-an-error.
1811 */
1812 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1813 trb_comp_code);
1814 xhci_dbg(xhci, "Treating code as success.\n");
1815 return 1;
1816 }
1817 return 0;
1818}
1819
Sarah Sharpd0e96f52009-04-27 19:58:01 -07001820/*
Andiry Xu4422da62010-07-22 15:22:55 -07001821 * Finish the td processing, remove the td from td list;
1822 * Return 1 if the urb can be given back.
1823 */
1824static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1825 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1826 struct xhci_virt_ep *ep, int *status, bool skip)
1827{
1828 struct xhci_virt_device *xdev;
1829 struct xhci_ring *ep_ring;
1830 unsigned int slot_id;
1831 int ep_index;
1832 struct urb *urb = NULL;
1833 struct xhci_ep_ctx *ep_ctx;
1834 int ret = 0;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001835 struct urb_priv *urb_priv;
Andiry Xu4422da62010-07-22 15:22:55 -07001836 u32 trb_comp_code;
1837
Matt Evans28ccd292011-03-29 13:40:46 +11001838 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Andiry Xu4422da62010-07-22 15:22:55 -07001839 xdev = xhci->devs[slot_id];
Matt Evans28ccd292011-03-29 13:40:46 +11001840 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1841 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
Andiry Xu4422da62010-07-22 15:22:55 -07001842 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +11001843 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Andiry Xu4422da62010-07-22 15:22:55 -07001844
1845 if (skip)
1846 goto td_cleanup;
1847
Lu Baolu40a3b772015-08-06 19:24:01 +03001848 if (trb_comp_code == COMP_STOP_INVAL ||
1849 trb_comp_code == COMP_STOP ||
1850 trb_comp_code == COMP_STOP_SHORT) {
Andiry Xu4422da62010-07-22 15:22:55 -07001851 /* The Endpoint Stop Command completion will take care of any
1852 * stopped TDs. A stopped TD may be restarted, so don't update
1853 * the ring dequeue pointer or take this TD off any lists yet.
1854 */
1855 ep->stopped_td = td;
Andiry Xu4422da62010-07-22 15:22:55 -07001856 return 0;
Mathias Nyman69defe02014-11-27 18:19:14 +02001857 }
1858 if (trb_comp_code == COMP_STALL ||
1859 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1860 trb_comp_code)) {
1861 /* Issue a reset endpoint command to clear the host side
1862 * halt, followed by a set dequeue command to move the
1863 * dequeue pointer past the TD.
1864 * The class driver clears the device side halt later.
1865 */
1866 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1867 ep_ring->stream_id, td, event_trb);
Andiry Xu4422da62010-07-22 15:22:55 -07001868 } else {
Mathias Nyman69defe02014-11-27 18:19:14 +02001869 /* Update ring dequeue pointer */
1870 while (ep_ring->dequeue != td->last_trb)
Andiry Xu3b72fca2012-03-05 17:49:32 +08001871 inc_deq(xhci, ep_ring);
Mathias Nyman69defe02014-11-27 18:19:14 +02001872 inc_deq(xhci, ep_ring);
1873 }
Andiry Xu4422da62010-07-22 15:22:55 -07001874
1875td_cleanup:
Mathias Nyman69defe02014-11-27 18:19:14 +02001876 /* Clean up the endpoint's TD list */
1877 urb = td->urb;
1878 urb_priv = urb->hcpriv;
Andiry Xu4422da62010-07-22 15:22:55 -07001879
Mathias Nymanf9c589e2016-06-21 10:58:02 +03001880 /* if a bounce buffer was used to align this td then unmap it */
1881 if (td->bounce_seg)
1882 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
1883
Mathias Nyman69defe02014-11-27 18:19:14 +02001884 /* Do one last check of the actual transfer length.
1885 * If the host controller said we transferred more data than the buffer
1886 * length, urb->actual_length will be a very big number (since it's
1887 * unsigned). Play it safe and say we didn't transfer anything.
1888 */
1889 if (urb->actual_length > urb->transfer_buffer_length) {
1890 xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
1891 urb->transfer_buffer_length,
1892 urb->actual_length);
1893 urb->actual_length = 0;
1894 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1895 *status = -EREMOTEIO;
1896 else
1897 *status = 0;
1898 }
1899 list_del_init(&td->td_list);
1900 /* Was this TD slated to be cancelled but completed anyway? */
1901 if (!list_empty(&td->cancelled_td_list))
1902 list_del_init(&td->cancelled_td_list);
Andiry Xu4422da62010-07-22 15:22:55 -07001903
Mathias Nyman69defe02014-11-27 18:19:14 +02001904 urb_priv->td_cnt++;
1905 /* Giveback the urb when all the tds are completed */
1906 if (urb_priv->td_cnt == urb_priv->length) {
1907 ret = 1;
1908 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1909 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1910 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
1911 if (xhci->quirks & XHCI_AMD_PLL_FIX)
1912 usb_amd_quirk_pll_enable();
Andiry Xuc41136b2011-03-22 17:08:14 +08001913 }
1914 }
Andiry Xu4422da62010-07-22 15:22:55 -07001915 }
1916
1917 return ret;
1918}
1919
1920/*
Andiry Xu8af56be2010-07-22 15:23:03 -07001921 * Process control tds, update urb status and actual_length.
1922 */
1923static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1924 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1925 struct xhci_virt_ep *ep, int *status)
1926{
1927 struct xhci_virt_device *xdev;
1928 struct xhci_ring *ep_ring;
1929 unsigned int slot_id;
1930 int ep_index;
1931 struct xhci_ep_ctx *ep_ctx;
1932 u32 trb_comp_code;
1933
Matt Evans28ccd292011-03-29 13:40:46 +11001934 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Andiry Xu8af56be2010-07-22 15:23:03 -07001935 xdev = xhci->devs[slot_id];
Matt Evans28ccd292011-03-29 13:40:46 +11001936 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1937 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
Andiry Xu8af56be2010-07-22 15:23:03 -07001938 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +11001939 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Andiry Xu8af56be2010-07-22 15:23:03 -07001940
Andiry Xu8af56be2010-07-22 15:23:03 -07001941 switch (trb_comp_code) {
1942 case COMP_SUCCESS:
1943 if (event_trb == ep_ring->dequeue) {
1944 xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1945 "without IOC set??\n");
1946 *status = -ESHUTDOWN;
1947 } else if (event_trb != td->last_trb) {
1948 xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1949 "without IOC set??\n");
1950 *status = -ESHUTDOWN;
1951 } else {
Andiry Xu8af56be2010-07-22 15:23:03 -07001952 *status = 0;
1953 }
1954 break;
1955 case COMP_SHORT_TX:
Andiry Xu8af56be2010-07-22 15:23:03 -07001956 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1957 *status = -EREMOTEIO;
1958 else
1959 *status = 0;
1960 break;
Lu Baolu40a3b772015-08-06 19:24:01 +03001961 case COMP_STOP_SHORT:
1962 if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
1963 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
1964 else
1965 td->urb->actual_length =
1966 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1967
1968 return finish_td(xhci, td, event_trb, event, ep, status, false);
Sarah Sharp3abeca92011-05-05 19:08:09 -07001969 case COMP_STOP:
Lu Baolu40a3b772015-08-06 19:24:01 +03001970 /* Did we stop at data stage? */
1971 if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
1972 td->urb->actual_length =
1973 td->urb->transfer_buffer_length -
1974 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1975 /* fall through */
1976 case COMP_STOP_INVAL:
Sarah Sharp3abeca92011-05-05 19:08:09 -07001977 return finish_td(xhci, td, event_trb, event, ep, status, false);
Andiry Xu8af56be2010-07-22 15:23:03 -07001978 default:
1979 if (!xhci_requires_manual_halt_cleanup(xhci,
1980 ep_ctx, trb_comp_code))
1981 break;
1982 xhci_dbg(xhci, "TRB error code %u, "
1983 "halted endpoint index = %u\n",
1984 trb_comp_code, ep_index);
1985 /* else fall through */
1986 case COMP_STALL:
1987 /* Did we transfer part of the data (middle) phase? */
1988 if (event_trb != ep_ring->dequeue &&
1989 event_trb != td->last_trb)
1990 td->urb->actual_length =
Vivek Gautam1c11a172013-03-21 12:06:48 +05301991 td->urb->transfer_buffer_length -
1992 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
Mathias Nyman22ae47e2015-05-29 17:01:53 +03001993 else if (!td->urb_length_set)
Andiry Xu8af56be2010-07-22 15:23:03 -07001994 td->urb->actual_length = 0;
1995
Mathias Nyman8e71a322014-11-18 11:27:12 +02001996 return finish_td(xhci, td, event_trb, event, ep, status, false);
Andiry Xu8af56be2010-07-22 15:23:03 -07001997 }
1998 /*
1999 * Did we transfer any data, despite the errors that might have
2000 * happened? I.e. did we get past the setup stage?
2001 */
2002 if (event_trb != ep_ring->dequeue) {
2003 /* The event was for the status stage */
2004 if (event_trb == td->last_trb) {
Aleksander Morgado45ba2152015-03-06 17:14:21 +02002005 if (td->urb_length_set) {
Andiry Xu8af56be2010-07-22 15:23:03 -07002006 /* Don't overwrite a previously set error code
2007 */
2008 if ((*status == -EINPROGRESS || *status == 0) &&
2009 (td->urb->transfer_flags
2010 & URB_SHORT_NOT_OK))
2011 /* Did we already see a short data
2012 * stage? */
2013 *status = -EREMOTEIO;
2014 } else {
2015 td->urb->actual_length =
2016 td->urb->transfer_buffer_length;
2017 }
2018 } else {
Aleksander Morgado45ba2152015-03-06 17:14:21 +02002019 /*
2020 * Maybe the event was for the data stage? If so, update
2021 * already the actual_length of the URB and flag it as
2022 * set, so that it is not overwritten in the event for
2023 * the last TRB.
2024 */
2025 td->urb_length_set = true;
Sarah Sharp3abeca92011-05-05 19:08:09 -07002026 td->urb->actual_length =
2027 td->urb->transfer_buffer_length -
Vivek Gautam1c11a172013-03-21 12:06:48 +05302028 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
Sarah Sharp3abeca92011-05-05 19:08:09 -07002029 xhci_dbg(xhci, "Waiting for status "
2030 "stage event\n");
2031 return 0;
Andiry Xu8af56be2010-07-22 15:23:03 -07002032 }
2033 }
2034
2035 return finish_td(xhci, td, event_trb, event, ep, status, false);
2036}
2037
2038/*
Andiry Xu04e51902010-07-22 15:23:39 -07002039 * Process isochronous tds, update urb packet status and actual_length.
2040 */
2041static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2042 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2043 struct xhci_virt_ep *ep, int *status)
2044{
2045 struct xhci_ring *ep_ring;
2046 struct urb_priv *urb_priv;
2047 int idx;
2048 int len = 0;
Andiry Xu04e51902010-07-22 15:23:39 -07002049 union xhci_trb *cur_trb;
2050 struct xhci_segment *cur_seg;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002051 struct usb_iso_packet_descriptor *frame;
Andiry Xu04e51902010-07-22 15:23:39 -07002052 u32 trb_comp_code;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002053 bool skip_td = false;
Andiry Xu04e51902010-07-22 15:23:39 -07002054
Matt Evans28ccd292011-03-29 13:40:46 +11002055 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2056 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Andiry Xu04e51902010-07-22 15:23:39 -07002057 urb_priv = td->urb->hcpriv;
2058 idx = urb_priv->td_cnt;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002059 frame = &td->urb->iso_frame_desc[idx];
Andiry Xu04e51902010-07-22 15:23:39 -07002060
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002061 /* handle completion code */
2062 switch (trb_comp_code) {
2063 case COMP_SUCCESS:
Vivek Gautam1c11a172013-03-21 12:06:48 +05302064 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
Sarah Sharp1530bbc62012-05-08 09:22:49 -07002065 frame->status = 0;
2066 break;
2067 }
2068 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2069 trb_comp_code = COMP_SHORT_TX;
Lu Baolu40a3b772015-08-06 19:24:01 +03002070 /* fallthrough */
2071 case COMP_STOP_SHORT:
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002072 case COMP_SHORT_TX:
2073 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2074 -EREMOTEIO : 0;
2075 break;
2076 case COMP_BW_OVER:
2077 frame->status = -ECOMM;
2078 skip_td = true;
2079 break;
2080 case COMP_BUFF_OVER:
2081 case COMP_BABBLE:
2082 frame->status = -EOVERFLOW;
2083 skip_td = true;
2084 break;
Alex Hef6ba6fe2011-06-08 18:34:06 +08002085 case COMP_DEV_ERR:
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002086 case COMP_STALL:
Mathias Nymand104d012015-04-30 17:16:02 +03002087 frame->status = -EPROTO;
2088 skip_td = true;
2089 break;
Hans de Goede9c745992012-04-23 15:06:09 +02002090 case COMP_TX_ERR:
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002091 frame->status = -EPROTO;
Mathias Nymand104d012015-04-30 17:16:02 +03002092 if (event_trb != td->last_trb)
2093 return 0;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002094 skip_td = true;
2095 break;
2096 case COMP_STOP:
2097 case COMP_STOP_INVAL:
2098 break;
2099 default:
2100 frame->status = -1;
2101 break;
Andiry Xu04e51902010-07-22 15:23:39 -07002102 }
2103
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002104 if (trb_comp_code == COMP_SUCCESS || skip_td) {
2105 frame->actual_length = frame->length;
2106 td->urb->actual_length += frame->length;
Lu Baolu40a3b772015-08-06 19:24:01 +03002107 } else if (trb_comp_code == COMP_STOP_SHORT) {
2108 frame->actual_length =
2109 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2110 td->urb->actual_length += frame->actual_length;
Andiry Xu04e51902010-07-22 15:23:39 -07002111 } else {
2112 for (cur_trb = ep_ring->dequeue,
2113 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2114 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
Matt Evansf5960b62011-06-01 10:22:55 +10002115 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
Mathias Nyman3495e452016-11-11 15:13:13 +02002116 !trb_is_link(cur_trb))
Matt Evans28ccd292011-03-29 13:40:46 +11002117 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
Andiry Xu04e51902010-07-22 15:23:39 -07002118 }
Matt Evans28ccd292011-03-29 13:40:46 +11002119 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
Vivek Gautam1c11a172013-03-21 12:06:48 +05302120 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
Andiry Xu04e51902010-07-22 15:23:39 -07002121
2122 if (trb_comp_code != COMP_STOP_INVAL) {
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002123 frame->actual_length = len;
Andiry Xu04e51902010-07-22 15:23:39 -07002124 td->urb->actual_length += len;
2125 }
2126 }
2127
Andiry Xu04e51902010-07-22 15:23:39 -07002128 return finish_td(xhci, td, event_trb, event, ep, status, false);
2129}
2130
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002131static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2132 struct xhci_transfer_event *event,
2133 struct xhci_virt_ep *ep, int *status)
2134{
2135 struct xhci_ring *ep_ring;
2136 struct urb_priv *urb_priv;
2137 struct usb_iso_packet_descriptor *frame;
2138 int idx;
2139
Matt Evansf6975312011-06-01 13:01:01 +10002140 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002141 urb_priv = td->urb->hcpriv;
2142 idx = urb_priv->td_cnt;
2143 frame = &td->urb->iso_frame_desc[idx];
2144
Sarah Sharpb3df3f92011-06-15 19:57:46 -07002145 /* The transfer is partly done. */
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002146 frame->status = -EXDEV;
2147
2148 /* calc actual length */
2149 frame->actual_length = 0;
2150
2151 /* Update ring dequeue pointer */
2152 while (ep_ring->dequeue != td->last_trb)
Andiry Xu3b72fca2012-03-05 17:49:32 +08002153 inc_deq(xhci, ep_ring);
2154 inc_deq(xhci, ep_ring);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002155
2156 return finish_td(xhci, td, NULL, event, ep, status, true);
2157}
2158
Andiry Xu04e51902010-07-22 15:23:39 -07002159/*
Andiry Xu22405ed2010-07-22 15:23:08 -07002160 * Process bulk and interrupt tds, update urb status and actual_length.
2161 */
2162static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2163 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2164 struct xhci_virt_ep *ep, int *status)
2165{
2166 struct xhci_ring *ep_ring;
2167 union xhci_trb *cur_trb;
2168 struct xhci_segment *cur_seg;
2169 u32 trb_comp_code;
2170
Matt Evans28ccd292011-03-29 13:40:46 +11002171 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2172 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Andiry Xu22405ed2010-07-22 15:23:08 -07002173
2174 switch (trb_comp_code) {
2175 case COMP_SUCCESS:
2176 /* Double check that the HW transferred everything. */
Sarah Sharp1530bbc62012-05-08 09:22:49 -07002177 if (event_trb != td->last_trb ||
Vivek Gautam1c11a172013-03-21 12:06:48 +05302178 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
Andiry Xu22405ed2010-07-22 15:23:08 -07002179 xhci_warn(xhci, "WARN Successful completion "
2180 "on short TX\n");
2181 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2182 *status = -EREMOTEIO;
2183 else
2184 *status = 0;
Sarah Sharp1530bbc62012-05-08 09:22:49 -07002185 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2186 trb_comp_code = COMP_SHORT_TX;
Andiry Xu22405ed2010-07-22 15:23:08 -07002187 } else {
Andiry Xu22405ed2010-07-22 15:23:08 -07002188 *status = 0;
2189 }
2190 break;
Lu Baolu40a3b772015-08-06 19:24:01 +03002191 case COMP_STOP_SHORT:
Andiry Xu22405ed2010-07-22 15:23:08 -07002192 case COMP_SHORT_TX:
2193 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2194 *status = -EREMOTEIO;
2195 else
2196 *status = 0;
2197 break;
2198 default:
2199 /* Others already handled above */
2200 break;
2201 }
Sarah Sharpf444ff22011-04-05 15:53:47 -07002202 if (trb_comp_code == COMP_SHORT_TX)
2203 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2204 "%d bytes untransferred\n",
2205 td->urb->ep->desc.bEndpointAddress,
2206 td->urb->transfer_buffer_length,
Vivek Gautam1c11a172013-03-21 12:06:48 +05302207 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
Lu Baolu40a3b772015-08-06 19:24:01 +03002208 /* Stopped - short packet completion */
2209 if (trb_comp_code == COMP_STOP_SHORT) {
2210 td->urb->actual_length =
2211 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2212
2213 if (td->urb->transfer_buffer_length <
2214 td->urb->actual_length) {
2215 xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
2216 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2217 td->urb->actual_length = 0;
2218 /* status will be set by usb core for canceled urbs */
2219 }
Andiry Xu22405ed2010-07-22 15:23:08 -07002220 /* Fast path - was this the last TRB in the TD for this URB? */
Lu Baolu40a3b772015-08-06 19:24:01 +03002221 } else if (event_trb == td->last_trb) {
Vivek Gautam1c11a172013-03-21 12:06:48 +05302222 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
Andiry Xu22405ed2010-07-22 15:23:08 -07002223 td->urb->actual_length =
2224 td->urb->transfer_buffer_length -
Vivek Gautam1c11a172013-03-21 12:06:48 +05302225 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
Andiry Xu22405ed2010-07-22 15:23:08 -07002226 if (td->urb->transfer_buffer_length <
2227 td->urb->actual_length) {
2228 xhci_warn(xhci, "HC gave bad length "
2229 "of %d bytes left\n",
Vivek Gautam1c11a172013-03-21 12:06:48 +05302230 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
Andiry Xu22405ed2010-07-22 15:23:08 -07002231 td->urb->actual_length = 0;
2232 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2233 *status = -EREMOTEIO;
2234 else
2235 *status = 0;
2236 }
2237 /* Don't overwrite a previously set error code */
2238 if (*status == -EINPROGRESS) {
2239 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2240 *status = -EREMOTEIO;
2241 else
2242 *status = 0;
2243 }
2244 } else {
2245 td->urb->actual_length =
2246 td->urb->transfer_buffer_length;
2247 /* Ignore a short packet completion if the
2248 * untransferred length was zero.
2249 */
2250 if (*status == -EREMOTEIO)
2251 *status = 0;
2252 }
2253 } else {
2254 /* Slow path - walk the list, starting from the dequeue
2255 * pointer, to get the actual length transferred.
2256 */
2257 td->urb->actual_length = 0;
2258 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2259 cur_trb != event_trb;
2260 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
Matt Evansf5960b62011-06-01 10:22:55 +10002261 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
Mathias Nyman3495e452016-11-11 15:13:13 +02002262 !trb_is_link(cur_trb))
Andiry Xu22405ed2010-07-22 15:23:08 -07002263 td->urb->actual_length +=
Matt Evans28ccd292011-03-29 13:40:46 +11002264 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
Andiry Xu22405ed2010-07-22 15:23:08 -07002265 }
2266 /* If the ring didn't stop on a Link or No-op TRB, add
2267 * in the actual bytes transferred from the Normal TRB
2268 */
2269 if (trb_comp_code != COMP_STOP_INVAL)
2270 td->urb->actual_length +=
Matt Evans28ccd292011-03-29 13:40:46 +11002271 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
Vivek Gautam1c11a172013-03-21 12:06:48 +05302272 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
Andiry Xu22405ed2010-07-22 15:23:08 -07002273 }
2274
2275 return finish_td(xhci, td, event_trb, event, ep, status, false);
2276}
2277
2278/*
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002279 * If this function returns an error condition, it means it got a Transfer
2280 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2281 * At this point, the host controller is probably hosed and should be reset.
2282 */
2283static int handle_tx_event(struct xhci_hcd *xhci,
2284 struct xhci_transfer_event *event)
Felipe Balbied384bd2012-08-07 14:10:03 +03002285 __releases(&xhci->lock)
2286 __acquires(&xhci->lock)
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002287{
2288 struct xhci_virt_device *xdev;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002289 struct xhci_virt_ep *ep;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002290 struct xhci_ring *ep_ring;
Sarah Sharp82d10092009-08-07 14:04:52 -07002291 unsigned int slot_id;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002292 int ep_index;
Randy Dunlap326b4812010-04-19 08:53:50 -07002293 struct xhci_td *td = NULL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002294 dma_addr_t event_dma;
2295 struct xhci_segment *event_seg;
2296 union xhci_trb *event_trb;
Randy Dunlap326b4812010-04-19 08:53:50 -07002297 struct urb *urb = NULL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002298 int status = -EINPROGRESS;
Andiry Xu8e51adc2010-07-22 15:23:31 -07002299 struct urb_priv *urb_priv;
John Yound115b042009-07-27 12:05:15 -07002300 struct xhci_ep_ctx *ep_ctx;
Andiry Xuc2d7b492011-09-19 16:05:12 -07002301 struct list_head *tmp;
Sarah Sharp66d1eeb2009-08-27 14:35:53 -07002302 u32 trb_comp_code;
Andiry Xu4422da62010-07-22 15:22:55 -07002303 int ret = 0;
Andiry Xuc2d7b492011-09-19 16:05:12 -07002304 int td_num = 0;
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002305 bool handling_skipped_tds = false;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002306
Matt Evans28ccd292011-03-29 13:40:46 +11002307 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
Sarah Sharp82d10092009-08-07 14:04:52 -07002308 xdev = xhci->devs[slot_id];
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002309 if (!xdev) {
2310 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
Sarah Sharp9258c0b2011-12-01 14:50:30 -08002311 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
Sarah Sharpe910b442012-01-04 16:54:12 -08002312 (unsigned long long) xhci_trb_virt_to_dma(
2313 xhci->event_ring->deq_seg,
Sarah Sharp9258c0b2011-12-01 14:50:30 -08002314 xhci->event_ring->dequeue),
2315 lower_32_bits(le64_to_cpu(event->buffer)),
2316 upper_32_bits(le64_to_cpu(event->buffer)),
2317 le32_to_cpu(event->transfer_len),
2318 le32_to_cpu(event->flags));
2319 xhci_dbg(xhci, "Event ring:\n");
2320 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002321 return -ENODEV;
2322 }
2323
2324 /* Endpoint ID is 1 based, our index is zero based */
Matt Evans28ccd292011-03-29 13:40:46 +11002325 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002326 ep = &xdev->eps[ep_index];
Matt Evans28ccd292011-03-29 13:40:46 +11002327 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
John Yound115b042009-07-27 12:05:15 -07002328 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002329 if (!ep_ring ||
Matt Evans28ccd292011-03-29 13:40:46 +11002330 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2331 EP_STATE_DISABLED) {
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002332 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2333 "or incorrect stream ring\n");
Sarah Sharp9258c0b2011-12-01 14:50:30 -08002334 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
Sarah Sharpe910b442012-01-04 16:54:12 -08002335 (unsigned long long) xhci_trb_virt_to_dma(
2336 xhci->event_ring->deq_seg,
Sarah Sharp9258c0b2011-12-01 14:50:30 -08002337 xhci->event_ring->dequeue),
2338 lower_32_bits(le64_to_cpu(event->buffer)),
2339 upper_32_bits(le64_to_cpu(event->buffer)),
2340 le32_to_cpu(event->transfer_len),
2341 le32_to_cpu(event->flags));
2342 xhci_dbg(xhci, "Event ring:\n");
2343 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002344 return -ENODEV;
2345 }
2346
Andiry Xuc2d7b492011-09-19 16:05:12 -07002347 /* Count current td numbers if ep->skip is set */
2348 if (ep->skip) {
2349 list_for_each(tmp, &ep_ring->td_list)
2350 td_num++;
2351 }
2352
Matt Evans28ccd292011-03-29 13:40:46 +11002353 event_dma = le64_to_cpu(event->buffer);
2354 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
Andiry Xu986a92d2010-07-22 15:23:20 -07002355 /* Look for common error cases */
Sarah Sharp66d1eeb2009-08-27 14:35:53 -07002356 switch (trb_comp_code) {
Sarah Sharpb10de142009-04-27 19:58:50 -07002357 /* Skip codes that require special handling depending on
2358 * transfer type
2359 */
2360 case COMP_SUCCESS:
Vivek Gautam1c11a172013-03-21 12:06:48 +05302361 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
Sarah Sharp1530bbc62012-05-08 09:22:49 -07002362 break;
2363 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2364 trb_comp_code = COMP_SHORT_TX;
2365 else
Sarah Sharp8202ce22012-07-25 10:52:45 -07002366 xhci_warn_ratelimited(xhci,
2367 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
Sarah Sharpb10de142009-04-27 19:58:50 -07002368 case COMP_SHORT_TX:
2369 break;
Sarah Sharpae636742009-04-29 19:02:31 -07002370 case COMP_STOP:
2371 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2372 break;
2373 case COMP_STOP_INVAL:
2374 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2375 break;
Lu Baolu40a3b772015-08-06 19:24:01 +03002376 case COMP_STOP_SHORT:
2377 xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
2378 break;
Sarah Sharpb10de142009-04-27 19:58:50 -07002379 case COMP_STALL:
Sarah Sharp2a9227a2011-10-25 13:55:30 +02002380 xhci_dbg(xhci, "Stalled endpoint\n");
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002381 ep->ep_state |= EP_HALTED;
Sarah Sharpb10de142009-04-27 19:58:50 -07002382 status = -EPIPE;
2383 break;
2384 case COMP_TRB_ERR:
2385 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2386 status = -EILSEQ;
2387 break;
Sarah Sharpec74e402009-11-11 10:28:36 -08002388 case COMP_SPLIT_ERR:
Sarah Sharpb10de142009-04-27 19:58:50 -07002389 case COMP_TX_ERR:
Sarah Sharp2a9227a2011-10-25 13:55:30 +02002390 xhci_dbg(xhci, "Transfer error on endpoint\n");
Sarah Sharpb10de142009-04-27 19:58:50 -07002391 status = -EPROTO;
2392 break;
Sarah Sharp4a731432009-07-27 12:04:32 -07002393 case COMP_BABBLE:
Sarah Sharp2a9227a2011-10-25 13:55:30 +02002394 xhci_dbg(xhci, "Babble error on endpoint\n");
Sarah Sharp4a731432009-07-27 12:04:32 -07002395 status = -EOVERFLOW;
2396 break;
Sarah Sharpb10de142009-04-27 19:58:50 -07002397 case COMP_DB_ERR:
2398 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2399 status = -ENOSR;
2400 break;
Andiry Xu986a92d2010-07-22 15:23:20 -07002401 case COMP_BW_OVER:
2402 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2403 break;
2404 case COMP_BUFF_OVER:
2405 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2406 break;
2407 case COMP_UNDERRUN:
2408 /*
2409 * When the Isoch ring is empty, the xHC will generate
2410 * a Ring Overrun Event for IN Isoch endpoint or Ring
2411 * Underrun Event for OUT Isoch endpoint.
2412 */
2413 xhci_dbg(xhci, "underrun event on endpoint\n");
2414 if (!list_empty(&ep_ring->td_list))
2415 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2416 "still with TDs queued?\n",
Matt Evans28ccd292011-03-29 13:40:46 +11002417 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2418 ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002419 goto cleanup;
2420 case COMP_OVERRUN:
2421 xhci_dbg(xhci, "overrun event on endpoint\n");
2422 if (!list_empty(&ep_ring->td_list))
2423 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2424 "still with TDs queued?\n",
Matt Evans28ccd292011-03-29 13:40:46 +11002425 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2426 ep_index);
Andiry Xu986a92d2010-07-22 15:23:20 -07002427 goto cleanup;
Alex Hef6ba6fe2011-06-08 18:34:06 +08002428 case COMP_DEV_ERR:
2429 xhci_warn(xhci, "WARN: detect an incompatible device");
2430 status = -EPROTO;
2431 break;
Andiry Xud18240d2010-07-22 15:23:25 -07002432 case COMP_MISSED_INT:
2433 /*
2434 * When encounter missed service error, one or more isoc tds
2435 * may be missed by xHC.
2436 * Set skip flag of the ep_ring; Complete the missed tds as
2437 * short transfer when process the ep_ring next time.
2438 */
2439 ep->skip = true;
2440 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2441 goto cleanup;
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002442 case COMP_PING_ERR:
2443 ep->skip = true;
2444 xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
2445 goto cleanup;
Sarah Sharpb10de142009-04-27 19:58:50 -07002446 default:
Sarah Sharpb45b5062009-12-09 15:59:06 -08002447 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
Sarah Sharp5ad6a522009-11-11 10:28:40 -08002448 status = 0;
2449 break;
2450 }
Mathias Nyman86cd7402015-01-09 16:06:32 +02002451 xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
2452 trb_comp_code);
Sarah Sharpb10de142009-04-27 19:58:50 -07002453 goto cleanup;
2454 }
Andiry Xu986a92d2010-07-22 15:23:20 -07002455
Andiry Xud18240d2010-07-22 15:23:25 -07002456 do {
2457 /* This TRB should be in the TD at the head of this ring's
2458 * TD list.
2459 */
2460 if (list_empty(&ep_ring->td_list)) {
Sarah Sharpa83d6752013-03-18 10:19:51 -07002461 /*
2462 * A stopped endpoint may generate an extra completion
2463 * event if the device was suspended. Don't print
2464 * warnings.
2465 */
2466 if (!(trb_comp_code == COMP_STOP ||
2467 trb_comp_code == COMP_STOP_INVAL)) {
2468 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2469 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2470 ep_index);
2471 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2472 (le32_to_cpu(event->flags) &
2473 TRB_TYPE_BITMASK)>>10);
2474 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2475 }
Andiry Xud18240d2010-07-22 15:23:25 -07002476 if (ep->skip) {
2477 ep->skip = false;
2478 xhci_dbg(xhci, "td_list is empty while skip "
2479 "flag set. Clear skip flag.\n");
2480 }
2481 ret = 0;
2482 goto cleanup;
2483 }
Andiry Xu986a92d2010-07-22 15:23:20 -07002484
Andiry Xuc2d7b492011-09-19 16:05:12 -07002485 /* We've skipped all the TDs on the ep ring when ep->skip set */
2486 if (ep->skip && td_num == 0) {
2487 ep->skip = false;
2488 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2489 "Clear skip flag.\n");
2490 ret = 0;
2491 goto cleanup;
2492 }
2493
Andiry Xud18240d2010-07-22 15:23:25 -07002494 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
Andiry Xuc2d7b492011-09-19 16:05:12 -07002495 if (ep->skip)
2496 td_num--;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002497
Andiry Xud18240d2010-07-22 15:23:25 -07002498 /* Is this a TRB in the currently executing TD? */
Hans de Goedecffb9be2014-08-20 16:41:51 +03002499 event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2500 td->last_trb, event_dma, false);
Alex Hee1cf4862011-06-03 15:58:25 +08002501
2502 /*
2503 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2504 * is not in the current TD pointed by ep_ring->dequeue because
2505 * that the hardware dequeue pointer still at the previous TRB
2506 * of the current TD. The previous TRB maybe a Link TD or the
2507 * last TRB of the previous TD. The command completion handle
2508 * will take care the rest.
2509 */
Hans de Goede9a548862014-08-19 15:17:56 +03002510 if (!event_seg && (trb_comp_code == COMP_STOP ||
2511 trb_comp_code == COMP_STOP_INVAL)) {
Alex Hee1cf4862011-06-03 15:58:25 +08002512 ret = 0;
2513 goto cleanup;
2514 }
2515
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002516 if (!event_seg) {
2517 if (!ep->skip ||
2518 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
Sarah Sharpad808332011-05-25 10:43:56 -07002519 /* Some host controllers give a spurious
2520 * successful event after a short transfer.
2521 * Ignore it.
2522 */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03002523 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
Sarah Sharpad808332011-05-25 10:43:56 -07002524 ep_ring->last_td_was_short) {
2525 ep_ring->last_td_was_short = false;
2526 ret = 0;
2527 goto cleanup;
2528 }
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002529 /* HC is busted, give up! */
2530 xhci_err(xhci,
2531 "ERROR Transfer event TRB DMA ptr not "
Hans de Goedecffb9be2014-08-20 16:41:51 +03002532 "part of current TD ep_index %d "
2533 "comp_code %u\n", ep_index,
2534 trb_comp_code);
2535 trb_in_td(xhci, ep_ring->deq_seg,
2536 ep_ring->dequeue, td->last_trb,
2537 event_dma, true);
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002538 return -ESHUTDOWN;
2539 }
2540
2541 ret = skip_isoc_td(xhci, td, event, ep, &status);
2542 goto cleanup;
2543 }
Sarah Sharpad808332011-05-25 10:43:56 -07002544 if (trb_comp_code == COMP_SHORT_TX)
2545 ep_ring->last_td_was_short = true;
2546 else
2547 ep_ring->last_td_was_short = false;
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002548
2549 if (ep->skip) {
Andiry Xud18240d2010-07-22 15:23:25 -07002550 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2551 ep->skip = false;
2552 }
Andiry Xu986a92d2010-07-22 15:23:20 -07002553
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002554 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2555 sizeof(*event_trb)];
2556 /*
2557 * No-op TRB should not trigger interrupts.
2558 * If event_trb is a no-op TRB, it means the
2559 * corresponding TD has been cancelled. Just ignore
2560 * the TD.
2561 */
Matt Evansf5960b62011-06-01 10:22:55 +10002562 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
Dmitry Torokhov926008c2011-03-23 20:47:05 -07002563 xhci_dbg(xhci,
2564 "event_trb is a no-op TRB. Skip it\n");
2565 goto cleanup;
Andiry Xud18240d2010-07-22 15:23:25 -07002566 }
2567
2568 /* Now update the urb's actual_length and give back to
2569 * the core
2570 */
2571 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2572 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2573 &status);
Andiry Xu04e51902010-07-22 15:23:39 -07002574 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2575 ret = process_isoc_td(xhci, td, event_trb, event, ep,
2576 &status);
Andiry Xud18240d2010-07-22 15:23:25 -07002577 else
2578 ret = process_bulk_intr_td(xhci, td, event_trb, event,
2579 ep, &status);
Andiry Xu4422da62010-07-22 15:22:55 -07002580
2581cleanup:
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002582
2583
2584 handling_skipped_tds = ep->skip &&
2585 trb_comp_code != COMP_MISSED_INT &&
2586 trb_comp_code != COMP_PING_ERR;
2587
Andiry Xud18240d2010-07-22 15:23:25 -07002588 /*
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002589 * Do not update event ring dequeue pointer if we're in a loop
2590 * processing missed tds.
Sarah Sharp82d10092009-08-07 14:04:52 -07002591 */
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002592 if (!handling_skipped_tds)
Andiry Xu3b72fca2012-03-05 17:49:32 +08002593 inc_deq(xhci, xhci->event_ring);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002594
Andiry Xud18240d2010-07-22 15:23:25 -07002595 if (ret) {
2596 urb = td->urb;
Andiry Xu8e51adc2010-07-22 15:23:31 -07002597 urb_priv = urb->hcpriv;
Mathias Nyman8e71a322014-11-18 11:27:12 +02002598
Lin Wang4daf9df2015-01-09 16:06:31 +02002599 xhci_urb_free_priv(urb_priv);
Andiry Xud18240d2010-07-22 15:23:25 -07002600
Sarah Sharp214f76f2010-10-26 11:22:02 -07002601 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
Sarah Sharpf444ff22011-04-05 15:53:47 -07002602 if ((urb->actual_length != urb->transfer_buffer_length &&
2603 (urb->transfer_flags &
2604 URB_SHORT_NOT_OK)) ||
Sarah Sharpfd984d22011-09-02 11:05:56 -07002605 (status != 0 &&
2606 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
Sarah Sharpf444ff22011-04-05 15:53:47 -07002607 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
Alan Stern1949f9e2012-05-07 13:22:52 -04002608 "expected = %d, status = %d\n",
Sarah Sharpf444ff22011-04-05 15:53:47 -07002609 urb, urb->actual_length,
2610 urb->transfer_buffer_length,
2611 status);
Andiry Xud18240d2010-07-22 15:23:25 -07002612 spin_unlock(&xhci->lock);
Sarah Sharpb3df3f92011-06-15 19:57:46 -07002613 /* EHCI, UHCI, and OHCI always unconditionally set the
2614 * urb->status of an isochronous endpoint to 0.
2615 */
2616 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2617 status = 0;
Sarah Sharp214f76f2010-10-26 11:22:02 -07002618 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
Andiry Xud18240d2010-07-22 15:23:25 -07002619 spin_lock(&xhci->lock);
2620 }
2621
2622 /*
2623 * If ep->skip is set, it means there are missed tds on the
2624 * endpoint ring need to take care of.
2625 * Process them as short transfer until reach the td pointed by
2626 * the event.
2627 */
Mathias Nyman3b4739b82015-10-12 11:30:12 +03002628 } while (handling_skipped_tds);
Andiry Xud18240d2010-07-22 15:23:25 -07002629
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002630 return 0;
2631}
2632
2633/*
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002634 * This function handles all OS-owned events on the event ring. It may drop
2635 * xhci->lock between event processing (e.g. to pass up port status changes).
Matt Evans9dee9a22011-03-29 13:41:02 +11002636 * Returns >0 for "possibly more events to process" (caller should call again),
2637 * otherwise 0 if done. In future, <0 returns should indicate error code.
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002638 */
Matt Evans9dee9a22011-03-29 13:41:02 +11002639static int xhci_handle_event(struct xhci_hcd *xhci)
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002640{
2641 union xhci_trb *event;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002642 int update_ptrs = 1;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002643 int ret;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002644
2645 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2646 xhci->error_bitmask |= 1 << 1;
Matt Evans9dee9a22011-03-29 13:41:02 +11002647 return 0;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002648 }
2649
2650 event = xhci->event_ring->dequeue;
2651 /* Does the HC or OS own the TRB? */
Matt Evans28ccd292011-03-29 13:40:46 +11002652 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2653 xhci->event_ring->cycle_state) {
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002654 xhci->error_bitmask |= 1 << 2;
Matt Evans9dee9a22011-03-29 13:41:02 +11002655 return 0;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002656 }
2657
Matt Evans92a3da42011-03-29 13:40:51 +11002658 /*
2659 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2660 * speculative reads of the event's flags/data below.
2661 */
2662 rmb();
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002663 /* FIXME: Handle more event types. */
Matt Evans28ccd292011-03-29 13:40:46 +11002664 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002665 case TRB_TYPE(TRB_COMPLETION):
2666 handle_cmd_completion(xhci, &event->event_cmd);
2667 break;
Sarah Sharp0f2a7932009-04-27 19:57:12 -07002668 case TRB_TYPE(TRB_PORT_STATUS):
2669 handle_port_status(xhci, event);
2670 update_ptrs = 0;
2671 break;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002672 case TRB_TYPE(TRB_TRANSFER):
2673 ret = handle_tx_event(xhci, &event->trans_event);
2674 if (ret < 0)
2675 xhci->error_bitmask |= 1 << 9;
2676 else
2677 update_ptrs = 0;
2678 break;
Sarah Sharp623bef92011-11-11 14:57:33 -08002679 case TRB_TYPE(TRB_DEV_NOTE):
2680 handle_device_notification(xhci, event);
2681 break;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002682 default:
Matt Evans28ccd292011-03-29 13:40:46 +11002683 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2684 TRB_TYPE(48))
Sarah Sharp02386342010-05-24 13:25:28 -07002685 handle_vendor_event(xhci, event);
2686 else
2687 xhci->error_bitmask |= 1 << 3;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002688 }
Sarah Sharp6f5165c2009-10-27 10:57:01 -07002689 /* Any of the above functions may drop and re-acquire the lock, so check
2690 * to make sure a watchdog timer didn't mark the host as non-responsive.
2691 */
2692 if (xhci->xhc_state & XHCI_STATE_DYING) {
2693 xhci_dbg(xhci, "xHCI host dying, returning from "
2694 "event handler.\n");
Matt Evans9dee9a22011-03-29 13:41:02 +11002695 return 0;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07002696 }
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002697
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002698 if (update_ptrs)
2699 /* Update SW event ring dequeue pointer */
Andiry Xu3b72fca2012-03-05 17:49:32 +08002700 inc_deq(xhci, xhci->event_ring);
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002701
Matt Evans9dee9a22011-03-29 13:41:02 +11002702 /* Are there more items on the event ring? Caller will call us again to
2703 * check.
2704 */
2705 return 1;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002706}
Sarah Sharp9032cd52010-07-29 22:12:29 -07002707
2708/*
2709 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2710 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2711 * indicators of an event TRB error, but we check the status *first* to be safe.
2712 */
2713irqreturn_t xhci_irq(struct usb_hcd *hcd)
2714{
2715 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Sarah Sharpc21599a2010-07-29 22:13:00 -07002716 u32 status;
Sarah Sharpbda53142010-07-29 22:12:38 -07002717 u64 temp_64;
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002718 union xhci_trb *event_ring_deq;
2719 dma_addr_t deq;
Sarah Sharp9032cd52010-07-29 22:12:29 -07002720
2721 spin_lock(&xhci->lock);
Sarah Sharp9032cd52010-07-29 22:12:29 -07002722 /* Check if the xHC generated the interrupt, or the irq is shared */
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +02002723 status = readl(&xhci->op_regs->status);
Sarah Sharpc21599a2010-07-29 22:13:00 -07002724 if (status == 0xffffffff)
Sarah Sharp9032cd52010-07-29 22:12:29 -07002725 goto hw_died;
2726
Sarah Sharpc21599a2010-07-29 22:13:00 -07002727 if (!(status & STS_EINT)) {
Sarah Sharp9032cd52010-07-29 22:12:29 -07002728 spin_unlock(&xhci->lock);
Sarah Sharp9032cd52010-07-29 22:12:29 -07002729 return IRQ_NONE;
2730 }
Sarah Sharp27e0dd42010-07-29 22:12:43 -07002731 if (status & STS_FATAL) {
Sarah Sharp9032cd52010-07-29 22:12:29 -07002732 xhci_warn(xhci, "WARNING: Host System Error\n");
2733 xhci_halt(xhci);
2734hw_died:
Sarah Sharp9032cd52010-07-29 22:12:29 -07002735 spin_unlock(&xhci->lock);
Joe Lawrence948fa132015-04-30 17:16:04 +03002736 return IRQ_HANDLED;
Sarah Sharp9032cd52010-07-29 22:12:29 -07002737 }
2738
Sarah Sharpbda53142010-07-29 22:12:38 -07002739 /*
2740 * Clear the op reg interrupt status first,
2741 * so we can receive interrupts from other MSI-X interrupters.
2742 * Write 1 to clear the interrupt status.
2743 */
Sarah Sharp27e0dd42010-07-29 22:12:43 -07002744 status |= STS_EINT;
Xenia Ragiadakou204b7792013-11-15 05:34:07 +02002745 writel(status, &xhci->op_regs->status);
Sarah Sharpbda53142010-07-29 22:12:38 -07002746 /* FIXME when MSI-X is supported and there are multiple vectors */
2747 /* Clear the MSI-X event interrupt status */
2748
Felipe Balbicd704692012-02-29 16:46:23 +02002749 if (hcd->irq) {
Sarah Sharpc21599a2010-07-29 22:13:00 -07002750 u32 irq_pending;
2751 /* Acknowledge the PCI interrupt */
Xenia Ragiadakoub0ba9722013-11-15 05:34:06 +02002752 irq_pending = readl(&xhci->ir_set->irq_pending);
Felipe Balbi4e833c02012-03-15 16:37:08 +02002753 irq_pending |= IMAN_IP;
Xenia Ragiadakou204b7792013-11-15 05:34:07 +02002754 writel(irq_pending, &xhci->ir_set->irq_pending);
Sarah Sharpc21599a2010-07-29 22:13:00 -07002755 }
Sarah Sharpbda53142010-07-29 22:12:38 -07002756
Gabriel Krisman Bertazi27a41a82016-06-01 18:09:07 +03002757 if (xhci->xhc_state & XHCI_STATE_DYING ||
2758 xhci->xhc_state & XHCI_STATE_HALTED) {
Sarah Sharpbda53142010-07-29 22:12:38 -07002759 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2760 "Shouldn't IRQs be disabled?\n");
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002761 /* Clear the event handler busy flag (RW1C);
2762 * the event ring should be empty.
Sarah Sharpbda53142010-07-29 22:12:38 -07002763 */
Sarah Sharpf7b2e402014-01-30 13:27:49 -08002764 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
Sarah Sharp477632d2014-01-29 14:02:00 -08002765 xhci_write_64(xhci, temp_64 | ERST_EHB,
2766 &xhci->ir_set->erst_dequeue);
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002767 spin_unlock(&xhci->lock);
2768
2769 return IRQ_HANDLED;
2770 }
2771
2772 event_ring_deq = xhci->event_ring->dequeue;
2773 /* FIXME this should be a delayed service routine
2774 * that clears the EHB.
2775 */
Matt Evans9dee9a22011-03-29 13:41:02 +11002776 while (xhci_handle_event(xhci) > 0) {}
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002777
Sarah Sharpf7b2e402014-01-30 13:27:49 -08002778 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002779 /* If necessary, update the HW's version of the event ring deq ptr. */
2780 if (event_ring_deq != xhci->event_ring->dequeue) {
2781 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2782 xhci->event_ring->dequeue);
2783 if (deq == 0)
2784 xhci_warn(xhci, "WARN something wrong with SW event "
2785 "ring dequeue ptr.\n");
2786 /* Update HC event ring dequeue pointer */
2787 temp_64 &= ERST_PTR_MASK;
2788 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2789 }
Sarah Sharpbda53142010-07-29 22:12:38 -07002790
2791 /* Clear the event handler busy flag (RW1C); event ring is empty. */
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002792 temp_64 |= ERST_EHB;
Sarah Sharp477632d2014-01-29 14:02:00 -08002793 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
Sarah Sharpc06d68b2010-07-29 22:12:49 -07002794
Sarah Sharp9032cd52010-07-29 22:12:29 -07002795 spin_unlock(&xhci->lock);
2796
2797 return IRQ_HANDLED;
2798}
2799
Alex Shi851ec162013-05-24 10:54:19 +08002800irqreturn_t xhci_msi_irq(int irq, void *hcd)
Sarah Sharp9032cd52010-07-29 22:12:29 -07002801{
Alan Stern968b8222011-11-03 12:03:38 -04002802 return xhci_irq(hcd);
Sarah Sharp9032cd52010-07-29 22:12:29 -07002803}
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002804
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002805/**** Endpoint Ring Operations ****/
2806
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002807/*
2808 * Generic function for queueing a TRB on a ring.
2809 * The caller must have checked to make sure there's room on the ring.
Sarah Sharp6cc30d82010-06-10 12:25:28 -07002810 *
2811 * @more_trbs_coming: Will you enqueue more TRBs before calling
2812 * prepare_transfer()?
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002813 */
2814static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
Andiry Xu3b72fca2012-03-05 17:49:32 +08002815 bool more_trbs_coming,
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002816 u32 field1, u32 field2, u32 field3, u32 field4)
2817{
2818 struct xhci_generic_trb *trb;
2819
2820 trb = &ring->enqueue->generic;
Matt Evans28ccd292011-03-29 13:40:46 +11002821 trb->field[0] = cpu_to_le32(field1);
2822 trb->field[1] = cpu_to_le32(field2);
2823 trb->field[2] = cpu_to_le32(field3);
2824 trb->field[3] = cpu_to_le32(field4);
Andiry Xu3b72fca2012-03-05 17:49:32 +08002825 inc_enq(xhci, ring, more_trbs_coming);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07002826}
2827
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002828/*
2829 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2830 * FIXME allocate segments if the ring is full.
2831 */
2832static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
Andiry Xu3b72fca2012-03-05 17:49:32 +08002833 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002834{
Andiry Xu8dfec612012-03-05 17:49:37 +08002835 unsigned int num_trbs_needed;
2836
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002837 /* Make sure the endpoint has been added to xHC schedule */
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002838 switch (ep_state) {
2839 case EP_STATE_DISABLED:
2840 /*
2841 * USB core changed config/interfaces without notifying us,
2842 * or hardware is reporting the wrong state.
2843 */
2844 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2845 return -ENOENT;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002846 case EP_STATE_ERROR:
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07002847 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002848 /* FIXME event handling code for error needs to clear it */
2849 /* XXX not sure if this should be -ENOENT or not */
2850 return -EINVAL;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07002851 case EP_STATE_HALTED:
2852 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002853 case EP_STATE_STOPPED:
2854 case EP_STATE_RUNNING:
2855 break;
2856 default:
2857 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2858 /*
2859 * FIXME issue Configure Endpoint command to try to get the HC
2860 * back into a known state.
2861 */
2862 return -EINVAL;
2863 }
Andiry Xu8dfec612012-03-05 17:49:37 +08002864
2865 while (1) {
Sarah Sharp3d4b81e2014-01-31 11:52:57 -08002866 if (room_on_ring(xhci, ep_ring, num_trbs))
2867 break;
Andiry Xu8dfec612012-03-05 17:49:37 +08002868
2869 if (ep_ring == xhci->cmd_ring) {
2870 xhci_err(xhci, "Do not support expand command ring\n");
2871 return -ENOMEM;
2872 }
2873
Xenia Ragiadakou68ffb012013-08-14 06:33:56 +03002874 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2875 "ERROR no room on ep ring, try ring expansion");
Andiry Xu8dfec612012-03-05 17:49:37 +08002876 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2877 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2878 mem_flags)) {
2879 xhci_err(xhci, "Ring expansion failed\n");
2880 return -ENOMEM;
2881 }
Peter Senna Tschudin261fa122012-09-12 19:03:17 +02002882 }
John Youn6c12db92010-05-10 15:33:00 -07002883
Mathias Nymand0c77d82016-06-21 10:58:07 +03002884 while (trb_is_link(ep_ring->enqueue)) {
2885 /* If we're not dealing with 0.95 hardware or isoc rings
2886 * on AMD 0.96 host, clear the chain bit.
2887 */
2888 if (!xhci_link_trb_quirk(xhci) &&
2889 !(ep_ring->type == TYPE_ISOC &&
2890 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2891 ep_ring->enqueue->link.control &=
2892 cpu_to_le32(~TRB_CHAIN);
2893 else
2894 ep_ring->enqueue->link.control |=
2895 cpu_to_le32(TRB_CHAIN);
John Youn6c12db92010-05-10 15:33:00 -07002896
Mathias Nymand0c77d82016-06-21 10:58:07 +03002897 wmb();
2898 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
John Youn6c12db92010-05-10 15:33:00 -07002899
Mathias Nymand0c77d82016-06-21 10:58:07 +03002900 /* Toggle the cycle bit after the last ring segment. */
2901 if (link_trb_toggles_cycle(ep_ring->enqueue))
2902 ep_ring->cycle_state ^= 1;
John Youn6c12db92010-05-10 15:33:00 -07002903
Mathias Nymand0c77d82016-06-21 10:58:07 +03002904 ep_ring->enq_seg = ep_ring->enq_seg->next;
2905 ep_ring->enqueue = ep_ring->enq_seg->trbs;
John Youn6c12db92010-05-10 15:33:00 -07002906 }
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002907 return 0;
2908}
2909
Sarah Sharp23e3be12009-04-29 19:05:20 -07002910static int prepare_transfer(struct xhci_hcd *xhci,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002911 struct xhci_virt_device *xdev,
2912 unsigned int ep_index,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002913 unsigned int stream_id,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002914 unsigned int num_trbs,
2915 struct urb *urb,
Andiry Xu8e51adc2010-07-22 15:23:31 -07002916 unsigned int td_index,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002917 gfp_t mem_flags)
2918{
2919 int ret;
Andiry Xu8e51adc2010-07-22 15:23:31 -07002920 struct urb_priv *urb_priv;
2921 struct xhci_td *td;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002922 struct xhci_ring *ep_ring;
John Yound115b042009-07-27 12:05:15 -07002923 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002924
2925 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2926 if (!ep_ring) {
2927 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2928 stream_id);
2929 return -EINVAL;
2930 }
2931
2932 ret = prepare_ring(xhci, ep_ring,
Matt Evans28ccd292011-03-29 13:40:46 +11002933 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
Andiry Xu3b72fca2012-03-05 17:49:32 +08002934 num_trbs, mem_flags);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002935 if (ret)
2936 return ret;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002937
Andiry Xu8e51adc2010-07-22 15:23:31 -07002938 urb_priv = urb->hcpriv;
2939 td = urb_priv->td[td_index];
2940
2941 INIT_LIST_HEAD(&td->td_list);
2942 INIT_LIST_HEAD(&td->cancelled_td_list);
2943
2944 if (td_index == 0) {
Sarah Sharp214f76f2010-10-26 11:22:02 -07002945 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
Sarah Sharpd13565c2011-07-22 14:34:34 -07002946 if (unlikely(ret))
Andiry Xu8e51adc2010-07-22 15:23:31 -07002947 return ret;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002948 }
2949
Andiry Xu8e51adc2010-07-22 15:23:31 -07002950 td->urb = urb;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002951 /* Add this TD to the tail of the endpoint ring's TD list */
Andiry Xu8e51adc2010-07-22 15:23:31 -07002952 list_add_tail(&td->td_list, &ep_ring->td_list);
2953 td->start_seg = ep_ring->enq_seg;
2954 td->first_trb = ep_ring->enqueue;
2955
2956 urb_priv->td[td_index] = td;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07002957
2958 return 0;
2959}
2960
Alexandr Ivanovd2510342016-04-22 13:17:09 +03002961static unsigned int count_trbs(u64 addr, u64 len)
Sarah Sharp8a96c052009-04-27 19:59:19 -07002962{
Alexandr Ivanovd2510342016-04-22 13:17:09 +03002963 unsigned int num_trbs;
Sarah Sharp8a96c052009-04-27 19:59:19 -07002964
Alexandr Ivanovd2510342016-04-22 13:17:09 +03002965 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
2966 TRB_MAX_BUFF_SIZE);
2967 if (num_trbs == 0)
2968 num_trbs++;
Sarah Sharp8a96c052009-04-27 19:59:19 -07002969
Sarah Sharp8a96c052009-04-27 19:59:19 -07002970 return num_trbs;
2971}
2972
Alexandr Ivanovd2510342016-04-22 13:17:09 +03002973static inline unsigned int count_trbs_needed(struct urb *urb)
Sarah Sharp8a96c052009-04-27 19:59:19 -07002974{
Alexandr Ivanovd2510342016-04-22 13:17:09 +03002975 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
2976}
2977
2978static unsigned int count_sg_trbs_needed(struct urb *urb)
2979{
2980 struct scatterlist *sg;
2981 unsigned int i, len, full_len, num_trbs = 0;
2982
2983 full_len = urb->transfer_buffer_length;
2984
2985 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
2986 len = sg_dma_len(sg);
2987 num_trbs += count_trbs(sg_dma_address(sg), len);
2988 len = min_t(unsigned int, len, full_len);
2989 full_len -= len;
2990 if (full_len == 0)
2991 break;
2992 }
2993
2994 return num_trbs;
2995}
2996
2997static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
2998{
2999 u64 addr, len;
3000
3001 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3002 len = urb->iso_frame_desc[i].length;
3003
3004 return count_trbs(addr, len);
3005}
3006
3007static void check_trb_math(struct urb *urb, int running_total)
3008{
3009 if (unlikely(running_total != urb->transfer_buffer_length))
Paul Zimmermana2490182011-02-12 14:06:44 -08003010 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
Sarah Sharp8a96c052009-04-27 19:59:19 -07003011 "queued %#x (%d), asked for %#x (%d)\n",
3012 __func__,
3013 urb->ep->desc.bEndpointAddress,
3014 running_total, running_total,
3015 urb->transfer_buffer_length,
3016 urb->transfer_buffer_length);
3017}
3018
Sarah Sharp23e3be12009-04-29 19:05:20 -07003019static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003020 unsigned int ep_index, unsigned int stream_id, int start_cycle,
Andiry Xue1eab2e2011-01-04 16:30:39 -08003021 struct xhci_generic_trb *start_trb)
Sarah Sharp8a96c052009-04-27 19:59:19 -07003022{
Sarah Sharp8a96c052009-04-27 19:59:19 -07003023 /*
3024 * Pass all the TRBs to the hardware at once and make sure this write
3025 * isn't reordered.
3026 */
3027 wmb();
Andiry Xu50f7b522010-12-20 15:09:34 +08003028 if (start_cycle)
Matt Evans28ccd292011-03-29 13:40:46 +11003029 start_trb->field[3] |= cpu_to_le32(start_cycle);
Andiry Xu50f7b522010-12-20 15:09:34 +08003030 else
Matt Evans28ccd292011-03-29 13:40:46 +11003031 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
Andiry Xube88fe42010-10-14 07:22:57 -07003032 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
Sarah Sharp8a96c052009-04-27 19:59:19 -07003033}
3034
Alexandr Ivanov78140152016-04-22 13:17:11 +03003035static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3036 struct xhci_ep_ctx *ep_ctx)
Sarah Sharp624defa2009-09-02 12:14:28 -07003037{
Sarah Sharp624defa2009-09-02 12:14:28 -07003038 int xhci_interval;
3039 int ep_interval;
3040
Matt Evans28ccd292011-03-29 13:40:46 +11003041 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
Sarah Sharp624defa2009-09-02 12:14:28 -07003042 ep_interval = urb->interval;
Alexandr Ivanov78140152016-04-22 13:17:11 +03003043
Sarah Sharp624defa2009-09-02 12:14:28 -07003044 /* Convert to microframes */
3045 if (urb->dev->speed == USB_SPEED_LOW ||
3046 urb->dev->speed == USB_SPEED_FULL)
3047 ep_interval *= 8;
Alexandr Ivanov78140152016-04-22 13:17:11 +03003048
Sarah Sharp624defa2009-09-02 12:14:28 -07003049 /* FIXME change this to a warning and a suggestion to use the new API
3050 * to set the polling interval (once the API is added).
3051 */
3052 if (xhci_interval != ep_interval) {
Dmitry Kasatkin0730d522013-08-27 17:47:35 +03003053 dev_dbg_ratelimited(&urb->dev->dev,
3054 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3055 ep_interval, ep_interval == 1 ? "" : "s",
3056 xhci_interval, xhci_interval == 1 ? "" : "s");
Sarah Sharp624defa2009-09-02 12:14:28 -07003057 urb->interval = xhci_interval;
3058 /* Convert back to frames for LS/FS devices */
3059 if (urb->dev->speed == USB_SPEED_LOW ||
3060 urb->dev->speed == USB_SPEED_FULL)
3061 urb->interval /= 8;
3062 }
Alexandr Ivanov78140152016-04-22 13:17:11 +03003063}
3064
3065/*
3066 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3067 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3068 * (comprised of sg list entries) can take several service intervals to
3069 * transmit.
3070 */
3071int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3072 struct urb *urb, int slot_id, unsigned int ep_index)
3073{
3074 struct xhci_ep_ctx *ep_ctx;
3075
3076 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3077 check_interval(xhci, urb, ep_ctx);
3078
Dan Carpenter3fc82062012-03-28 10:30:26 +03003079 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
Sarah Sharp624defa2009-09-02 12:14:28 -07003080}
3081
Sarah Sharp04dd9502009-11-11 10:28:30 -08003082/*
Sarah Sharp4525c0a2012-10-25 15:56:40 -07003083 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3084 * packets remaining in the TD (*not* including this TRB).
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003085 *
3086 * Total TD packet count = total_packet_count =
Sarah Sharp4525c0a2012-10-25 15:56:40 -07003087 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003088 *
3089 * Packets transferred up to and including this TRB = packets_transferred =
3090 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3091 *
3092 * TD size = total_packet_count - packets_transferred
3093 *
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003094 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3095 * including this TRB, right shifted by 10
3096 *
3097 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3098 * This is taken care of in the TRB_TD_SIZE() macro
3099 *
Sarah Sharp4525c0a2012-10-25 15:56:40 -07003100 * The last TRB in a TD must have the TD size set to zero.
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003101 */
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003102static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3103 int trb_buff_len, unsigned int td_total_len,
Mathias Nyman124c3932016-06-21 10:57:59 +03003104 struct urb *urb, bool more_trbs_coming)
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003105{
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003106 u32 maxp, total_packet_count;
3107
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003108 /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
3109 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003110 return ((td_total_len - transferred) >> 10);
3111
Sarah Sharp48df4a62011-08-12 10:23:01 -07003112 /* One TRB with a zero-length data packet. */
Mathias Nyman124c3932016-06-21 10:57:59 +03003113 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003114 trb_buff_len == td_total_len)
Sarah Sharp48df4a62011-08-12 10:23:01 -07003115 return 0;
3116
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003117 /* for MTK xHCI, TD size doesn't include this TRB */
3118 if (xhci->quirks & XHCI_MTK_HOST)
3119 trb_buff_len = 0;
3120
3121 maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3122 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3123
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003124 /* Queueing functions don't count the current TRB into transferred */
3125 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003126}
3127
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003128
Mathias Nyman474ed232016-06-21 10:58:01 +03003129static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003130 u32 *trb_buff_len, struct xhci_segment *seg)
Mathias Nyman474ed232016-06-21 10:58:01 +03003131{
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003132 struct device *dev = xhci_to_hcd(xhci)->self.controller;
Mathias Nyman474ed232016-06-21 10:58:01 +03003133 unsigned int unalign;
3134 unsigned int max_pkt;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003135 u32 new_buff_len;
Mathias Nyman474ed232016-06-21 10:58:01 +03003136
3137 max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3138 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3139
3140 /* we got lucky, last normal TRB data on segment is packet aligned */
3141 if (unalign == 0)
3142 return 0;
3143
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003144 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3145 unalign, *trb_buff_len);
3146
Mathias Nyman474ed232016-06-21 10:58:01 +03003147 /* is the last nornal TRB alignable by splitting it */
3148 if (*trb_buff_len > unalign) {
3149 *trb_buff_len -= unalign;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003150 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
Mathias Nyman474ed232016-06-21 10:58:01 +03003151 return 0;
3152 }
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003153
3154 /*
3155 * We want enqd_len + trb_buff_len to sum up to a number aligned to
3156 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
3157 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
3158 */
3159 new_buff_len = max_pkt - (enqd_len % max_pkt);
3160
3161 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3162 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3163
3164 /* create a max max_pkt sized bounce buffer pointed to by last trb */
3165 if (usb_urb_dir_out(urb)) {
3166 sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
3167 seg->bounce_buf, new_buff_len, enqd_len);
3168 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3169 max_pkt, DMA_TO_DEVICE);
3170 } else {
3171 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3172 max_pkt, DMA_FROM_DEVICE);
3173 }
3174
3175 if (dma_mapping_error(dev, seg->bounce_dma)) {
3176 /* try without aligning. Some host controllers survive */
3177 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3178 return 0;
3179 }
3180 *trb_buff_len = new_buff_len;
3181 seg->bounce_len = new_buff_len;
3182 seg->bounce_offs = enqd_len;
3183
3184 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3185
Mathias Nyman474ed232016-06-21 10:58:01 +03003186 return 1;
3187}
3188
Sarah Sharpb10de142009-04-27 19:58:50 -07003189/* This is very similar to what ehci-q.c qtd_fill() does */
Sarah Sharp23e3be12009-04-29 19:05:20 -07003190int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
Sarah Sharpb10de142009-04-27 19:58:50 -07003191 struct urb *urb, int slot_id, unsigned int ep_index)
3192{
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003193 struct xhci_ring *ring;
Andiry Xu8e51adc2010-07-22 15:23:31 -07003194 struct urb_priv *urb_priv;
Sarah Sharpb10de142009-04-27 19:58:50 -07003195 struct xhci_td *td;
Sarah Sharpb10de142009-04-27 19:58:50 -07003196 struct xhci_generic_trb *start_trb;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003197 struct scatterlist *sg = NULL;
Mathias Nyman5a83f042016-06-21 10:57:58 +03003198 bool more_trbs_coming = true;
3199 bool need_zero_pkt = false;
Mathias Nyman86065c22016-06-21 10:58:00 +03003200 bool first_trb = true;
3201 unsigned int num_trbs;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003202 unsigned int start_cycle, num_sgs = 0;
Mathias Nyman86065c22016-06-21 10:58:00 +03003203 unsigned int enqd_len, block_len, trb_buff_len, full_len;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003204 int sent_len, ret;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003205 u32 field, length_field, remainder;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003206 u64 addr, send_addr;
Sarah Sharpb10de142009-04-27 19:58:50 -07003207
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003208 ring = xhci_urb_to_transfer_ring(xhci, urb);
3209 if (!ring)
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003210 return -EINVAL;
Sarah Sharpb10de142009-04-27 19:58:50 -07003211
Mathias Nyman86065c22016-06-21 10:58:00 +03003212 full_len = urb->transfer_buffer_length;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003213 /* If we have scatter/gather list, we use it. */
3214 if (urb->num_sgs) {
3215 num_sgs = urb->num_mapped_sgs;
3216 sg = urb->sg;
Mathias Nyman86065c22016-06-21 10:58:00 +03003217 addr = (u64) sg_dma_address(sg);
3218 block_len = sg_dma_len(sg);
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003219 num_trbs = count_sg_trbs_needed(urb);
Mathias Nyman86065c22016-06-21 10:58:00 +03003220 } else {
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003221 num_trbs = count_trbs_needed(urb);
Mathias Nyman86065c22016-06-21 10:58:00 +03003222 addr = (u64) urb->transfer_dma;
3223 block_len = full_len;
3224 }
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003225 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3226 ep_index, urb->stream_id,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003227 num_trbs, urb, 0, mem_flags);
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003228 if (unlikely(ret < 0))
Sarah Sharpb10de142009-04-27 19:58:50 -07003229 return ret;
3230
Andiry Xu8e51adc2010-07-22 15:23:31 -07003231 urb_priv = urb->hcpriv;
Reyad Attiyat4758dcd2015-08-06 19:23:58 +03003232
3233 /* Deal with URB_ZERO_PACKET - need one more td/trb */
Mathias Nyman5a83f042016-06-21 10:57:58 +03003234 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
3235 need_zero_pkt = true;
Reyad Attiyat4758dcd2015-08-06 19:23:58 +03003236
Andiry Xu8e51adc2010-07-22 15:23:31 -07003237 td = urb_priv->td[0];
3238
Sarah Sharpb10de142009-04-27 19:58:50 -07003239 /*
3240 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3241 * until we've finished creating all the other TRBs. The ring's cycle
3242 * state may change as we enqueue the other TRBs, so save it too.
3243 */
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003244 start_trb = &ring->enqueue->generic;
3245 start_cycle = ring->cycle_state;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003246 send_addr = addr;
Sarah Sharpb10de142009-04-27 19:58:50 -07003247
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003248 /* Queue the TRBs, even if they are zero-length */
Alban Browaeys0d2daad2016-08-16 10:18:04 +03003249 for (enqd_len = 0; first_trb || enqd_len < full_len;
3250 enqd_len += trb_buff_len) {
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003251 field = TRB_TYPE(TRB_NORMAL);
3252
Mathias Nyman86065c22016-06-21 10:58:00 +03003253 /* TRB buffer should not cross 64KB boundaries */
3254 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3255 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003256
Mathias Nyman86065c22016-06-21 10:58:00 +03003257 if (enqd_len + trb_buff_len > full_len)
3258 trb_buff_len = full_len - enqd_len;
Sarah Sharpb10de142009-04-27 19:58:50 -07003259
3260 /* Don't change the cycle bit of the first TRB until later */
Mathias Nyman86065c22016-06-21 10:58:00 +03003261 if (first_trb) {
3262 first_trb = false;
Andiry Xu50f7b522010-12-20 15:09:34 +08003263 if (start_cycle == 0)
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003264 field |= TRB_CYCLE;
Andiry Xu50f7b522010-12-20 15:09:34 +08003265 } else
Mathias Nyman5a5a0b12016-06-21 10:57:57 +03003266 field |= ring->cycle_state;
Sarah Sharpb10de142009-04-27 19:58:50 -07003267
3268 /* Chain all the TRBs together; clear the chain bit in the last
3269 * TRB to indicate it's the last TRB in the chain.
3270 */
Mathias Nyman86065c22016-06-21 10:58:00 +03003271 if (enqd_len + trb_buff_len < full_len) {
Sarah Sharpb10de142009-04-27 19:58:50 -07003272 field |= TRB_CHAIN;
Mathias Nyman2d98ef42016-06-21 10:58:04 +03003273 if (trb_is_link(ring->enqueue + 1)) {
Mathias Nyman474ed232016-06-21 10:58:01 +03003274 if (xhci_align_td(xhci, urb, enqd_len,
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003275 &trb_buff_len,
3276 ring->enq_seg)) {
3277 send_addr = ring->enq_seg->bounce_dma;
3278 /* assuming TD won't span 2 segs */
3279 td->bounce_seg = ring->enq_seg;
3280 }
Mathias Nyman474ed232016-06-21 10:58:01 +03003281 }
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003282 }
3283 if (enqd_len + trb_buff_len >= full_len) {
3284 field &= ~TRB_CHAIN;
Sarah Sharpb10de142009-04-27 19:58:50 -07003285 field |= TRB_IOC;
Mathias Nyman124c3932016-06-21 10:57:59 +03003286 more_trbs_coming = false;
Mathias Nyman5a83f042016-06-21 10:57:58 +03003287 td->last_trb = ring->enqueue;
Sarah Sharpb10de142009-04-27 19:58:50 -07003288 }
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003289
3290 /* Only set interrupt on short packet for IN endpoints */
3291 if (usb_urb_dir_in(urb))
3292 field |= TRB_ISP;
3293
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003294 /* Set the TRB length, TD size, and interrupter fields. */
Mathias Nyman86065c22016-06-21 10:58:00 +03003295 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3296 full_len, urb, more_trbs_coming);
3297
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003298 length_field = TRB_LEN(trb_buff_len) |
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003299 TRB_TD_SIZE(remainder) |
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003300 TRB_INTR_TARGET(0);
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003301
Mathias Nyman124c3932016-06-21 10:57:59 +03003302 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003303 lower_32_bits(send_addr),
3304 upper_32_bits(send_addr),
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003305 length_field,
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003306 field);
3307
Sarah Sharpb10de142009-04-27 19:58:50 -07003308 addr += trb_buff_len;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003309 sent_len = trb_buff_len;
Sarah Sharpb10de142009-04-27 19:58:50 -07003310
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003311 while (sg && sent_len >= block_len) {
Mathias Nyman86065c22016-06-21 10:58:00 +03003312 /* New sg entry */
3313 --num_sgs;
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003314 sent_len -= block_len;
Mathias Nyman86065c22016-06-21 10:58:00 +03003315 if (num_sgs != 0) {
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003316 sg = sg_next(sg);
Mathias Nyman86065c22016-06-21 10:58:00 +03003317 block_len = sg_dma_len(sg);
3318 addr = (u64) sg_dma_address(sg);
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003319 addr += sent_len;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003320 }
3321 }
Mathias Nymanf9c589e2016-06-21 10:58:02 +03003322 block_len -= sent_len;
3323 send_addr = addr;
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003324 }
3325
Mathias Nyman5a83f042016-06-21 10:57:58 +03003326 if (need_zero_pkt) {
3327 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3328 ep_index, urb->stream_id,
3329 1, urb, 1, mem_flags);
3330 urb_priv->td[1]->last_trb = ring->enqueue;
3331 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3332 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3333 }
3334
Mathias Nyman86065c22016-06-21 10:58:00 +03003335 check_trb_math(urb, enqd_len);
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003336 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
Andiry Xue1eab2e2011-01-04 16:30:39 -08003337 start_cycle, start_trb);
Sarah Sharpb10de142009-04-27 19:58:50 -07003338 return 0;
3339}
3340
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003341/* Caller must have locked xhci->lock */
Sarah Sharp23e3be12009-04-29 19:05:20 -07003342int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003343 struct urb *urb, int slot_id, unsigned int ep_index)
3344{
3345 struct xhci_ring *ep_ring;
3346 int num_trbs;
3347 int ret;
3348 struct usb_ctrlrequest *setup;
3349 struct xhci_generic_trb *start_trb;
3350 int start_cycle;
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003351 u32 field, length_field, remainder;
Andiry Xu8e51adc2010-07-22 15:23:31 -07003352 struct urb_priv *urb_priv;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003353 struct xhci_td *td;
3354
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003355 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3356 if (!ep_ring)
3357 return -EINVAL;
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003358
3359 /*
3360 * Need to copy setup packet into setup TRB, so we can't use the setup
3361 * DMA address.
3362 */
3363 if (!urb->setup_packet)
3364 return -EINVAL;
3365
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003366 /* 1 TRB for setup, 1 for status */
3367 num_trbs = 2;
3368 /*
3369 * Don't need to check if we need additional event data and normal TRBs,
3370 * since data in control transfers will never get bigger than 16MB
3371 * XXX: can we get a buffer that crosses 64KB boundaries?
3372 */
3373 if (urb->transfer_buffer_length > 0)
3374 num_trbs++;
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003375 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3376 ep_index, urb->stream_id,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003377 num_trbs, urb, 0, mem_flags);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003378 if (ret < 0)
3379 return ret;
3380
Andiry Xu8e51adc2010-07-22 15:23:31 -07003381 urb_priv = urb->hcpriv;
3382 td = urb_priv->td[0];
3383
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003384 /*
3385 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3386 * until we've finished creating all the other TRBs. The ring's cycle
3387 * state may change as we enqueue the other TRBs, so save it too.
3388 */
3389 start_trb = &ep_ring->enqueue->generic;
3390 start_cycle = ep_ring->cycle_state;
3391
3392 /* Queue setup TRB - see section 6.4.1.2.1 */
3393 /* FIXME better way to translate setup_packet into two u32 fields? */
3394 setup = (struct usb_ctrlrequest *) urb->setup_packet;
Andiry Xu50f7b522010-12-20 15:09:34 +08003395 field = 0;
3396 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3397 if (start_cycle == 0)
3398 field |= 0x1;
Andiry Xub83cdc82011-05-05 18:13:56 +08003399
Mathias Nymandca77942015-09-21 17:46:16 +03003400 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
Chunfeng Yun0cbd4b32015-11-24 13:09:55 +02003401 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
Andiry Xub83cdc82011-05-05 18:13:56 +08003402 if (urb->transfer_buffer_length > 0) {
3403 if (setup->bRequestType & USB_DIR_IN)
3404 field |= TRB_TX_TYPE(TRB_DATA_IN);
3405 else
3406 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3407 }
3408 }
3409
Andiry Xu3b72fca2012-03-05 17:49:32 +08003410 queue_trb(xhci, ep_ring, true,
Matt Evans28ccd292011-03-29 13:40:46 +11003411 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3412 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3413 TRB_LEN(8) | TRB_INTR_TARGET(0),
3414 /* Immediate data in pointer */
3415 field);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003416
3417 /* If there's data, queue data TRBs */
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003418 /* Only set interrupt on short packet for IN endpoints */
3419 if (usb_urb_dir_in(urb))
3420 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3421 else
3422 field = TRB_TYPE(TRB_DATA);
3423
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003424 remainder = xhci_td_remainder(xhci, 0,
3425 urb->transfer_buffer_length,
3426 urb->transfer_buffer_length,
3427 urb, 1);
3428
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003429 length_field = TRB_LEN(urb->transfer_buffer_length) |
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003430 TRB_TD_SIZE(remainder) |
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003431 TRB_INTR_TARGET(0);
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003432
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003433 if (urb->transfer_buffer_length > 0) {
3434 if (setup->bRequestType & USB_DIR_IN)
3435 field |= TRB_DIR_IN;
Andiry Xu3b72fca2012-03-05 17:49:32 +08003436 queue_trb(xhci, ep_ring, true,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003437 lower_32_bits(urb->transfer_dma),
3438 upper_32_bits(urb->transfer_dma),
Sarah Sharpf9dc68f2009-07-27 12:03:07 -07003439 length_field,
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003440 field | ep_ring->cycle_state);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003441 }
3442
3443 /* Save the DMA address of the last TRB in the TD */
3444 td->last_trb = ep_ring->enqueue;
3445
3446 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3447 /* If the device sent data, the status stage is an OUT transfer */
3448 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3449 field = 0;
3450 else
3451 field = TRB_DIR_IN;
Andiry Xu3b72fca2012-03-05 17:49:32 +08003452 queue_trb(xhci, ep_ring, false,
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003453 0,
3454 0,
3455 TRB_INTR_TARGET(0),
3456 /* Event on completion */
3457 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3458
Sarah Sharpe9df17e2010-04-02 15:34:43 -07003459 giveback_first_trb(xhci, slot_id, ep_index, 0,
Andiry Xue1eab2e2011-01-04 16:30:39 -08003460 start_cycle, start_trb);
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003461 return 0;
3462}
3463
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003464/*
3465 * The transfer burst count field of the isochronous TRB defines the number of
3466 * bursts that are required to move all packets in this TD. Only SuperSpeed
3467 * devices can burst up to bMaxBurst number of packets per service interval.
3468 * This field is zero based, meaning a value of zero in the field means one
3469 * burst. Basically, for everything but SuperSpeed devices, this field will be
3470 * zero. Only xHCI 1.0 host controllers support this field.
3471 */
3472static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003473 struct urb *urb, unsigned int total_packet_count)
3474{
3475 unsigned int max_burst;
3476
Mathias Nyman09c352e2016-02-12 16:40:17 +02003477 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003478 return 0;
3479
3480 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
Mathias Nyman3213b152014-06-24 17:14:41 +03003481 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
Sarah Sharp5cd43e32011-04-08 09:37:29 -07003482}
3483
Sarah Sharpb61d3782011-04-19 17:43:33 -07003484/*
3485 * Returns the number of packets in the last "burst" of packets. This field is
3486 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3487 * the last burst packet count is equal to the total number of packets in the
3488 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3489 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3490 * contain 1 to (bMaxBurst + 1) packets.
3491 */
3492static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
Sarah Sharpb61d3782011-04-19 17:43:33 -07003493 struct urb *urb, unsigned int total_packet_count)
3494{
3495 unsigned int max_burst;
3496 unsigned int residue;
3497
3498 if (xhci->hci_version < 0x100)
3499 return 0;
3500
Mathias Nyman09c352e2016-02-12 16:40:17 +02003501 if (urb->dev->speed >= USB_SPEED_SUPER) {
Sarah Sharpb61d3782011-04-19 17:43:33 -07003502 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3503 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3504 residue = total_packet_count % (max_burst + 1);
3505 /* If residue is zero, the last burst contains (max_burst + 1)
3506 * number of packets, but the TLBPC field is zero-based.
3507 */
3508 if (residue == 0)
3509 return max_burst;
3510 return residue - 1;
Sarah Sharpb61d3782011-04-19 17:43:33 -07003511 }
Mathias Nyman09c352e2016-02-12 16:40:17 +02003512 if (total_packet_count == 0)
3513 return 0;
3514 return total_packet_count - 1;
Sarah Sharpb61d3782011-04-19 17:43:33 -07003515}
3516
Lu Baolu79b80942015-08-06 19:24:00 +03003517/*
3518 * Calculates Frame ID field of the isochronous TRB identifies the
3519 * target frame that the Interval associated with this Isochronous
3520 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3521 *
3522 * Returns actual frame id on success, negative value on error.
3523 */
3524static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3525 struct urb *urb, int index)
3526{
3527 int start_frame, ist, ret = 0;
3528 int start_frame_id, end_frame_id, current_frame_id;
3529
3530 if (urb->dev->speed == USB_SPEED_LOW ||
3531 urb->dev->speed == USB_SPEED_FULL)
3532 start_frame = urb->start_frame + index * urb->interval;
3533 else
3534 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3535
3536 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3537 *
3538 * If bit [3] of IST is cleared to '0', software can add a TRB no
3539 * later than IST[2:0] Microframes before that TRB is scheduled to
3540 * be executed.
3541 * If bit [3] of IST is set to '1', software can add a TRB no later
3542 * than IST[2:0] Frames before that TRB is scheduled to be executed.
3543 */
3544 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3545 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3546 ist <<= 3;
3547
3548 /* Software shall not schedule an Isoch TD with a Frame ID value that
3549 * is less than the Start Frame ID or greater than the End Frame ID,
3550 * where:
3551 *
3552 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3553 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3554 *
3555 * Both the End Frame ID and Start Frame ID values are calculated
3556 * in microframes. When software determines the valid Frame ID value;
3557 * The End Frame ID value should be rounded down to the nearest Frame
3558 * boundary, and the Start Frame ID value should be rounded up to the
3559 * nearest Frame boundary.
3560 */
3561 current_frame_id = readl(&xhci->run_regs->microframe_index);
3562 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3563 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3564
3565 start_frame &= 0x7ff;
3566 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3567 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3568
3569 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3570 __func__, index, readl(&xhci->run_regs->microframe_index),
3571 start_frame_id, end_frame_id, start_frame);
3572
3573 if (start_frame_id < end_frame_id) {
3574 if (start_frame > end_frame_id ||
3575 start_frame < start_frame_id)
3576 ret = -EINVAL;
3577 } else if (start_frame_id > end_frame_id) {
3578 if ((start_frame > end_frame_id &&
3579 start_frame < start_frame_id))
3580 ret = -EINVAL;
3581 } else {
3582 ret = -EINVAL;
3583 }
3584
3585 if (index == 0) {
3586 if (ret == -EINVAL || start_frame == start_frame_id) {
3587 start_frame = start_frame_id + 1;
3588 if (urb->dev->speed == USB_SPEED_LOW ||
3589 urb->dev->speed == USB_SPEED_FULL)
3590 urb->start_frame = start_frame;
3591 else
3592 urb->start_frame = start_frame << 3;
3593 ret = 0;
3594 }
3595 }
3596
3597 if (ret) {
3598 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3599 start_frame, current_frame_id, index,
3600 start_frame_id, end_frame_id);
3601 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3602 return ret;
3603 }
3604
3605 return start_frame;
3606}
3607
Andiry Xu04e51902010-07-22 15:23:39 -07003608/* This is for isoc transfer */
3609static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3610 struct urb *urb, int slot_id, unsigned int ep_index)
3611{
3612 struct xhci_ring *ep_ring;
3613 struct urb_priv *urb_priv;
3614 struct xhci_td *td;
3615 int num_tds, trbs_per_td;
3616 struct xhci_generic_trb *start_trb;
3617 bool first_trb;
3618 int start_cycle;
3619 u32 field, length_field;
3620 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3621 u64 start_addr, addr;
3622 int i, j;
Andiry Xu47cbf692010-12-20 14:49:48 +08003623 bool more_trbs_coming;
Lu Baolu79b80942015-08-06 19:24:00 +03003624 struct xhci_virt_ep *xep;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003625 int frame_id;
Andiry Xu04e51902010-07-22 15:23:39 -07003626
Lu Baolu79b80942015-08-06 19:24:00 +03003627 xep = &xhci->devs[slot_id]->eps[ep_index];
Andiry Xu04e51902010-07-22 15:23:39 -07003628 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3629
3630 num_tds = urb->number_of_packets;
3631 if (num_tds < 1) {
3632 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3633 return -EINVAL;
3634 }
Andiry Xu04e51902010-07-22 15:23:39 -07003635 start_addr = (u64) urb->transfer_dma;
3636 start_trb = &ep_ring->enqueue->generic;
3637 start_cycle = ep_ring->cycle_state;
3638
Sarah Sharp522989a2011-07-29 12:44:32 -07003639 urb_priv = urb->hcpriv;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003640 /* Queue the TRBs for each TD, even if they are zero-length */
Andiry Xu04e51902010-07-22 15:23:39 -07003641 for (i = 0; i < num_tds; i++) {
Mathias Nyman09c352e2016-02-12 16:40:17 +02003642 unsigned int total_pkt_count, max_pkt;
3643 unsigned int burst_count, last_burst_pkt_count;
3644 u32 sia_frame_id;
Andiry Xu04e51902010-07-22 15:23:39 -07003645
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003646 first_trb = true;
Andiry Xu04e51902010-07-22 15:23:39 -07003647 running_total = 0;
3648 addr = start_addr + urb->iso_frame_desc[i].offset;
3649 td_len = urb->iso_frame_desc[i].length;
3650 td_remain_len = td_len;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003651 max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3652 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
3653
Sarah Sharp48df4a62011-08-12 10:23:01 -07003654 /* A zero-length transfer still involves at least one packet. */
Mathias Nyman09c352e2016-02-12 16:40:17 +02003655 if (total_pkt_count == 0)
3656 total_pkt_count++;
3657 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
3658 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
3659 urb, total_pkt_count);
Andiry Xu04e51902010-07-22 15:23:39 -07003660
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003661 trbs_per_td = count_isoc_trbs_needed(urb, i);
Andiry Xu04e51902010-07-22 15:23:39 -07003662
3663 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003664 urb->stream_id, trbs_per_td, urb, i, mem_flags);
Sarah Sharp522989a2011-07-29 12:44:32 -07003665 if (ret < 0) {
3666 if (i == 0)
3667 return ret;
3668 goto cleanup;
3669 }
Andiry Xu04e51902010-07-22 15:23:39 -07003670 td = urb_priv->td[i];
Mathias Nyman09c352e2016-02-12 16:40:17 +02003671
3672 /* use SIA as default, if frame id is used overwrite it */
3673 sia_frame_id = TRB_SIA;
3674 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3675 HCC_CFC(xhci->hcc_params)) {
3676 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
3677 if (frame_id >= 0)
3678 sia_frame_id = TRB_FRAME_ID(frame_id);
3679 }
3680 /*
3681 * Set isoc specific data for the first TRB in a TD.
3682 * Prevent HW from getting the TRBs by keeping the cycle state
3683 * inverted in the first TDs isoc TRB.
3684 */
Mathias Nyman2f6d3b62016-02-12 16:40:18 +02003685 field = TRB_TYPE(TRB_ISOC) |
Mathias Nyman09c352e2016-02-12 16:40:17 +02003686 TRB_TLBPC(last_burst_pkt_count) |
3687 sia_frame_id |
3688 (i ? ep_ring->cycle_state : !start_cycle);
3689
Mathias Nyman2f6d3b62016-02-12 16:40:18 +02003690 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
3691 if (!xep->use_extended_tbc)
3692 field |= TRB_TBC(burst_count);
3693
Mathias Nyman09c352e2016-02-12 16:40:17 +02003694 /* fill the rest of the TRB fields, and remaining normal TRBs */
Andiry Xu04e51902010-07-22 15:23:39 -07003695 for (j = 0; j < trbs_per_td; j++) {
3696 u32 remainder = 0;
Andiry Xu04e51902010-07-22 15:23:39 -07003697
Mathias Nyman09c352e2016-02-12 16:40:17 +02003698 /* only first TRB is isoc, overwrite otherwise */
3699 if (!first_trb)
3700 field = TRB_TYPE(TRB_NORMAL) |
3701 ep_ring->cycle_state;
Andiry Xu04e51902010-07-22 15:23:39 -07003702
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003703 /* Only set interrupt on short packet for IN EPs */
3704 if (usb_urb_dir_in(urb))
3705 field |= TRB_ISP;
3706
Mathias Nyman09c352e2016-02-12 16:40:17 +02003707 /* Set the chain bit for all except the last TRB */
Andiry Xu04e51902010-07-22 15:23:39 -07003708 if (j < trbs_per_td - 1) {
Andiry Xu47cbf692010-12-20 14:49:48 +08003709 more_trbs_coming = true;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003710 field |= TRB_CHAIN;
Andiry Xu04e51902010-07-22 15:23:39 -07003711 } else {
Mathias Nyman09c352e2016-02-12 16:40:17 +02003712 more_trbs_coming = false;
Andiry Xu04e51902010-07-22 15:23:39 -07003713 td->last_trb = ep_ring->enqueue;
3714 field |= TRB_IOC;
Mathias Nyman09c352e2016-02-12 16:40:17 +02003715 /* set BEI, except for the last TD */
3716 if (xhci->hci_version >= 0x100 &&
3717 !(xhci->quirks & XHCI_AVOID_BEI) &&
3718 i < num_tds - 1)
3719 field |= TRB_BEI;
Andiry Xu04e51902010-07-22 15:23:39 -07003720 }
Andiry Xu04e51902010-07-22 15:23:39 -07003721 /* Calculate TRB length */
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003722 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
Andiry Xu04e51902010-07-22 15:23:39 -07003723 if (trb_buff_len > td_remain_len)
3724 trb_buff_len = td_remain_len;
3725
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003726 /* Set the TRB length, TD size, & interrupter fields. */
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003727 remainder = xhci_td_remainder(xhci, running_total,
3728 trb_buff_len, td_len,
Mathias Nyman124c3932016-06-21 10:57:59 +03003729 urb, more_trbs_coming);
Mathias Nymanc840d6c2015-10-09 13:30:08 +03003730
Andiry Xu04e51902010-07-22 15:23:39 -07003731 length_field = TRB_LEN(trb_buff_len) |
Andiry Xu04e51902010-07-22 15:23:39 -07003732 TRB_INTR_TARGET(0);
Sarah Sharp4da6e6f2011-04-01 14:01:30 -07003733
Mathias Nyman2f6d3b62016-02-12 16:40:18 +02003734 /* xhci 1.1 with ETE uses TD Size field for TBC */
3735 if (first_trb && xep->use_extended_tbc)
3736 length_field |= TRB_TD_SIZE_TBC(burst_count);
3737 else
3738 length_field |= TRB_TD_SIZE(remainder);
3739 first_trb = false;
3740
Andiry Xu3b72fca2012-03-05 17:49:32 +08003741 queue_trb(xhci, ep_ring, more_trbs_coming,
Andiry Xu04e51902010-07-22 15:23:39 -07003742 lower_32_bits(addr),
3743 upper_32_bits(addr),
3744 length_field,
Sarah Sharpaf8b9e62011-03-23 16:26:26 -07003745 field);
Andiry Xu04e51902010-07-22 15:23:39 -07003746 running_total += trb_buff_len;
3747
3748 addr += trb_buff_len;
3749 td_remain_len -= trb_buff_len;
3750 }
3751
3752 /* Check TD length */
3753 if (running_total != td_len) {
3754 xhci_err(xhci, "ISOC TD length unmatch\n");
Andiry Xucf840552012-01-18 17:47:12 +08003755 ret = -EINVAL;
3756 goto cleanup;
Andiry Xu04e51902010-07-22 15:23:39 -07003757 }
3758 }
3759
Lu Baolu79b80942015-08-06 19:24:00 +03003760 /* store the next frame id */
3761 if (HCC_CFC(xhci->hcc_params))
3762 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
3763
Andiry Xuc41136b2011-03-22 17:08:14 +08003764 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3765 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3766 usb_amd_quirk_pll_disable();
3767 }
3768 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3769
Andiry Xue1eab2e2011-01-04 16:30:39 -08003770 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3771 start_cycle, start_trb);
Andiry Xu04e51902010-07-22 15:23:39 -07003772 return 0;
Sarah Sharp522989a2011-07-29 12:44:32 -07003773cleanup:
3774 /* Clean up a partially enqueued isoc transfer. */
3775
3776 for (i--; i >= 0; i--)
Sarah Sharp585df1d2011-08-02 15:43:40 -07003777 list_del_init(&urb_priv->td[i]->td_list);
Sarah Sharp522989a2011-07-29 12:44:32 -07003778
3779 /* Use the first TD as a temporary variable to turn the TDs we've queued
3780 * into No-ops with a software-owned cycle bit. That way the hardware
3781 * won't accidentally start executing bogus TDs when we partially
3782 * overwrite them. td->first_trb and td->start_seg are already set.
3783 */
3784 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3785 /* Every TRB except the first & last will have its cycle bit flipped. */
3786 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3787
3788 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3789 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3790 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3791 ep_ring->cycle_state = start_cycle;
Andiry Xub008df62012-03-05 17:49:34 +08003792 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
Sarah Sharp522989a2011-07-29 12:44:32 -07003793 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3794 return ret;
Andiry Xu04e51902010-07-22 15:23:39 -07003795}
3796
3797/*
3798 * Check transfer ring to guarantee there is enough room for the urb.
3799 * Update ISO URB start_frame and interval.
Lu Baolu79b80942015-08-06 19:24:00 +03003800 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
3801 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
3802 * Contiguous Frame ID is not supported by HC.
Andiry Xu04e51902010-07-22 15:23:39 -07003803 */
3804int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3805 struct urb *urb, int slot_id, unsigned int ep_index)
3806{
3807 struct xhci_virt_device *xdev;
3808 struct xhci_ring *ep_ring;
3809 struct xhci_ep_ctx *ep_ctx;
3810 int start_frame;
Andiry Xu04e51902010-07-22 15:23:39 -07003811 int num_tds, num_trbs, i;
3812 int ret;
Lu Baolu79b80942015-08-06 19:24:00 +03003813 struct xhci_virt_ep *xep;
3814 int ist;
Andiry Xu04e51902010-07-22 15:23:39 -07003815
3816 xdev = xhci->devs[slot_id];
Lu Baolu79b80942015-08-06 19:24:00 +03003817 xep = &xhci->devs[slot_id]->eps[ep_index];
Andiry Xu04e51902010-07-22 15:23:39 -07003818 ep_ring = xdev->eps[ep_index].ring;
3819 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3820
3821 num_trbs = 0;
3822 num_tds = urb->number_of_packets;
3823 for (i = 0; i < num_tds; i++)
Alexandr Ivanovd2510342016-04-22 13:17:09 +03003824 num_trbs += count_isoc_trbs_needed(urb, i);
Andiry Xu04e51902010-07-22 15:23:39 -07003825
3826 /* Check the ring to guarantee there is enough room for the whole urb.
3827 * Do not insert any td of the urb to the ring if the check failed.
3828 */
Matt Evans28ccd292011-03-29 13:40:46 +11003829 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003830 num_trbs, mem_flags);
Andiry Xu04e51902010-07-22 15:23:39 -07003831 if (ret)
3832 return ret;
3833
Lu Baolu79b80942015-08-06 19:24:00 +03003834 /*
3835 * Check interval value. This should be done before we start to
3836 * calculate the start frame value.
3837 */
Alexandr Ivanov78140152016-04-22 13:17:11 +03003838 check_interval(xhci, urb, ep_ctx);
Lu Baolu79b80942015-08-06 19:24:00 +03003839
3840 /* Calculate the start frame and put it in urb->start_frame. */
Lu Baolu42df7212015-11-18 10:48:21 +02003841 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3842 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
3843 EP_STATE_RUNNING) {
3844 urb->start_frame = xep->next_frame_id;
3845 goto skip_start_over;
3846 }
Lu Baolu79b80942015-08-06 19:24:00 +03003847 }
3848
3849 start_frame = readl(&xhci->run_regs->microframe_index);
3850 start_frame &= 0x3fff;
3851 /*
3852 * Round up to the next frame and consider the time before trb really
3853 * gets scheduled by hardare.
3854 */
3855 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3856 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3857 ist <<= 3;
3858 start_frame += ist + XHCI_CFC_DELAY;
3859 start_frame = roundup(start_frame, 8);
3860
3861 /*
3862 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
3863 * is greate than 8 microframes.
3864 */
3865 if (urb->dev->speed == USB_SPEED_LOW ||
3866 urb->dev->speed == USB_SPEED_FULL) {
3867 start_frame = roundup(start_frame, urb->interval << 3);
3868 urb->start_frame = start_frame >> 3;
3869 } else {
3870 start_frame = roundup(start_frame, urb->interval);
3871 urb->start_frame = start_frame;
3872 }
3873
3874skip_start_over:
Andiry Xub008df62012-03-05 17:49:34 +08003875 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3876
Dan Carpenter3fc82062012-03-28 10:30:26 +03003877 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
Andiry Xu04e51902010-07-22 15:23:39 -07003878}
3879
Sarah Sharpd0e96f52009-04-27 19:58:01 -07003880/**** Command Ring Operations ****/
3881
Sarah Sharp913a8a32009-09-04 10:53:13 -07003882/* Generic function for queueing a command TRB on the command ring.
3883 * Check to make sure there's room on the command ring for one command TRB.
3884 * Also check that there's room reserved for commands that must not fail.
3885 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3886 * then only check for the number of reserved spots.
3887 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3888 * because the command event handler may want to resubmit a failed command.
3889 */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003890static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3891 u32 field1, u32 field2,
3892 u32 field3, u32 field4, bool command_must_succeed)
Sarah Sharp7f84eef2009-04-27 19:53:56 -07003893{
Sarah Sharp913a8a32009-09-04 10:53:13 -07003894 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
Sarah Sharpd1dc9082010-07-09 17:08:38 +02003895 int ret;
Roger Quadrosad6b1d92015-05-29 17:01:49 +03003896
Mathias Nyman98d74f92016-04-08 16:25:10 +03003897 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3898 (xhci->xhc_state & XHCI_STATE_HALTED)) {
Roger Quadrosad6b1d92015-05-29 17:01:49 +03003899 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03003900 return -ESHUTDOWN;
Roger Quadrosad6b1d92015-05-29 17:01:49 +03003901 }
Sarah Sharpd1dc9082010-07-09 17:08:38 +02003902
Sarah Sharp913a8a32009-09-04 10:53:13 -07003903 if (!command_must_succeed)
3904 reserved_trbs++;
3905
Sarah Sharpd1dc9082010-07-09 17:08:38 +02003906 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
Andiry Xu3b72fca2012-03-05 17:49:32 +08003907 reserved_trbs, GFP_ATOMIC);
Sarah Sharpd1dc9082010-07-09 17:08:38 +02003908 if (ret < 0) {
3909 xhci_err(xhci, "ERR: No room for command on command ring\n");
Sarah Sharp913a8a32009-09-04 10:53:13 -07003910 if (command_must_succeed)
3911 xhci_err(xhci, "ERR: Reserved TRB counting for "
3912 "unfailable commands failed.\n");
Sarah Sharpd1dc9082010-07-09 17:08:38 +02003913 return ret;
Sarah Sharp7f84eef2009-04-27 19:53:56 -07003914 }
Mathias Nymanc9aa1a22014-05-08 19:26:01 +03003915
3916 cmd->command_trb = xhci->cmd_ring->enqueue;
3917 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003918
Mathias Nymanc311e392014-05-08 19:26:03 +03003919 /* if there are no other commands queued we start the timeout timer */
3920 if (xhci->cmd_list.next == &cmd->cmd_list &&
3921 !timer_pending(&xhci->cmd_timer)) {
3922 xhci->current_cmd = cmd;
3923 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
3924 }
3925
Andiry Xu3b72fca2012-03-05 17:49:32 +08003926 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
3927 field4 | xhci->cmd_ring->cycle_state);
Sarah Sharp7f84eef2009-04-27 19:53:56 -07003928 return 0;
3929}
3930
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003931/* Queue a slot enable or disable request on the command ring */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003932int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
3933 u32 trb_type, u32 slot_id)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003934{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003935 return queue_command(xhci, cmd, 0, 0, 0,
Sarah Sharp913a8a32009-09-04 10:53:13 -07003936 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003937}
3938
3939/* Queue an address device command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003940int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3941 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003942{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003943 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
Sarah Sharp8e595a52009-07-27 12:03:31 -07003944 upper_32_bits(in_ctx_ptr), 0,
Dan Williams48fc7db2013-12-05 17:07:27 -08003945 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
3946 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003947}
Sarah Sharpf94e01862009-04-27 19:58:38 -07003948
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003949int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
Sarah Sharp02386342010-05-24 13:25:28 -07003950 u32 field1, u32 field2, u32 field3, u32 field4)
3951{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003952 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
Sarah Sharp02386342010-05-24 13:25:28 -07003953}
3954
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003955/* Queue a reset device command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003956int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3957 u32 slot_id)
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003958{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003959 return queue_command(xhci, cmd, 0, 0, 0,
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003960 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3961 false);
3962}
3963
Sarah Sharpf94e01862009-04-27 19:58:38 -07003964/* Queue a configure endpoint command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003965int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
3966 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
Sarah Sharp913a8a32009-09-04 10:53:13 -07003967 u32 slot_id, bool command_must_succeed)
Sarah Sharpf94e01862009-04-27 19:58:38 -07003968{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003969 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
Sarah Sharp8e595a52009-07-27 12:03:31 -07003970 upper_32_bits(in_ctx_ptr), 0,
Sarah Sharp913a8a32009-09-04 10:53:13 -07003971 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
3972 command_must_succeed);
Sarah Sharpf94e01862009-04-27 19:58:38 -07003973}
Sarah Sharpae636742009-04-29 19:02:31 -07003974
Sarah Sharpf2217e82009-08-07 14:04:43 -07003975/* Queue an evaluate context command TRB */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003976int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
3977 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
Sarah Sharpf2217e82009-08-07 14:04:43 -07003978{
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003979 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
Sarah Sharpf2217e82009-08-07 14:04:43 -07003980 upper_32_bits(in_ctx_ptr), 0,
Sarah Sharp913a8a32009-09-04 10:53:13 -07003981 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
Sarah Sharp4b266542012-05-07 15:34:26 -07003982 command_must_succeed);
Sarah Sharpf2217e82009-08-07 14:04:43 -07003983}
3984
Andiry Xube88fe42010-10-14 07:22:57 -07003985/*
3986 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3987 * activity on an endpoint that is about to be suspended.
3988 */
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003989int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
3990 int slot_id, unsigned int ep_index, int suspend)
Sarah Sharpae636742009-04-29 19:02:31 -07003991{
3992 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3993 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3994 u32 type = TRB_TYPE(TRB_STOP_RING);
Andiry Xube88fe42010-10-14 07:22:57 -07003995 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
Sarah Sharpae636742009-04-29 19:02:31 -07003996
Mathias Nymanddba5cd2014-05-08 19:26:00 +03003997 return queue_command(xhci, cmd, 0, 0, 0,
Andiry Xube88fe42010-10-14 07:22:57 -07003998 trb_slot_id | trb_ep_index | type | trb_suspend, false);
Sarah Sharpae636742009-04-29 19:02:31 -07003999}
4000
Hans de Goeded3a43e62014-08-20 16:41:53 +03004001/* Set Transfer Ring Dequeue Pointer command */
4002void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
4003 unsigned int slot_id, unsigned int ep_index,
4004 unsigned int stream_id,
4005 struct xhci_dequeue_state *deq_state)
Sarah Sharpae636742009-04-29 19:02:31 -07004006{
4007 dma_addr_t addr;
4008 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4009 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
Sarah Sharpe9df17e2010-04-02 15:34:43 -07004010 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
Hans de Goede95241db2013-10-04 00:29:48 +02004011 u32 trb_sct = 0;
Sarah Sharpae636742009-04-29 19:02:31 -07004012 u32 type = TRB_TYPE(TRB_SET_DEQ);
Sarah Sharpbf161e82011-02-23 15:46:42 -08004013 struct xhci_virt_ep *ep;
Hans de Goede1e3452e2014-08-20 16:41:52 +03004014 struct xhci_command *cmd;
4015 int ret;
Sarah Sharpae636742009-04-29 19:02:31 -07004016
Hans de Goeded3a43e62014-08-20 16:41:53 +03004017 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
4018 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
4019 deq_state->new_deq_seg,
4020 (unsigned long long)deq_state->new_deq_seg->dma,
4021 deq_state->new_deq_ptr,
4022 (unsigned long long)xhci_trb_virt_to_dma(
4023 deq_state->new_deq_seg, deq_state->new_deq_ptr),
4024 deq_state->new_cycle_state);
4025
4026 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
4027 deq_state->new_deq_ptr);
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07004028 if (addr == 0) {
Sarah Sharpae636742009-04-29 19:02:31 -07004029 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07004030 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
Hans de Goeded3a43e62014-08-20 16:41:53 +03004031 deq_state->new_deq_seg, deq_state->new_deq_ptr);
4032 return;
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07004033 }
Sarah Sharpbf161e82011-02-23 15:46:42 -08004034 ep = &xhci->devs[slot_id]->eps[ep_index];
4035 if ((ep->ep_state & SET_DEQ_PENDING)) {
4036 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4037 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
Hans de Goeded3a43e62014-08-20 16:41:53 +03004038 return;
Sarah Sharpbf161e82011-02-23 15:46:42 -08004039 }
Hans de Goede1e3452e2014-08-20 16:41:52 +03004040
4041 /* This function gets called from contexts where it cannot sleep */
4042 cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
4043 if (!cmd) {
4044 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
Hans de Goeded3a43e62014-08-20 16:41:53 +03004045 return;
Hans de Goede1e3452e2014-08-20 16:41:52 +03004046 }
4047
Hans de Goeded3a43e62014-08-20 16:41:53 +03004048 ep->queued_deq_seg = deq_state->new_deq_seg;
4049 ep->queued_deq_ptr = deq_state->new_deq_ptr;
Hans de Goede95241db2013-10-04 00:29:48 +02004050 if (stream_id)
4051 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
Hans de Goede1e3452e2014-08-20 16:41:52 +03004052 ret = queue_command(xhci, cmd,
Hans de Goeded3a43e62014-08-20 16:41:53 +03004053 lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
4054 upper_32_bits(addr), trb_stream_id,
4055 trb_slot_id | trb_ep_index | type, false);
Hans de Goede1e3452e2014-08-20 16:41:52 +03004056 if (ret < 0) {
4057 xhci_free_command(xhci, cmd);
Hans de Goeded3a43e62014-08-20 16:41:53 +03004058 return;
Hans de Goede1e3452e2014-08-20 16:41:52 +03004059 }
4060
Hans de Goeded3a43e62014-08-20 16:41:53 +03004061 /* Stop the TD queueing code from ringing the doorbell until
4062 * this command completes. The HC won't set the dequeue pointer
4063 * if the ring is running, and ringing the doorbell starts the
4064 * ring running.
4065 */
4066 ep->ep_state |= SET_DEQ_PENDING;
Sarah Sharpae636742009-04-29 19:02:31 -07004067}
Sarah Sharpa1587d92009-07-27 12:03:15 -07004068
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004069int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4070 int slot_id, unsigned int ep_index)
Sarah Sharpa1587d92009-07-27 12:03:15 -07004071{
4072 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4073 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4074 u32 type = TRB_TYPE(TRB_RESET_EP);
4075
Mathias Nymanddba5cd2014-05-08 19:26:00 +03004076 return queue_command(xhci, cmd, 0, 0, 0,
4077 trb_slot_id | trb_ep_index | type, false);
Sarah Sharpa1587d92009-07-27 12:03:15 -07004078}