blob: 7c960559162d8a03bd2aab17e72dad8e196bbf64 [file] [log] [blame]
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Cadence USBSS DRD Driver - gadget side.
4 *
5 * Copyright (C) 2018-2019 Cadence Design Systems.
6 * Copyright (C) 2017-2018 NXP
7 *
8 * Authors: Pawel Jez <pjez@cadence.com>,
9 * Pawel Laszczak <pawell@cadence.com>
10 * Peter Chen <peter.chen@nxp.com>
11 */
12
13/*
14 * Work around 1:
15 * At some situations, the controller may get stale data address in TRB
16 * at below sequences:
17 * 1. Controller read TRB includes data address
18 * 2. Software updates TRBs includes data address and Cycle bit
19 * 3. Controller read TRB which includes Cycle bit
20 * 4. DMA run with stale data address
21 *
22 * To fix this problem, driver needs to make the first TRB in TD as invalid.
23 * After preparing all TRBs driver needs to check the position of DMA and
24 * if the DMA point to the first just added TRB and doorbell is 1,
25 * then driver must defer making this TRB as valid. This TRB will be make
26 * as valid during adding next TRB only if DMA is stopped or at TRBERR
27 * interrupt.
28 *
29 * Issue has been fixed in DEV_VER_V3 version of controller.
30 *
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +010031 * Work around 2:
32 * Controller for OUT endpoints has shared on-chip buffers for all incoming
33 * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA
34 * in correct order. If the first packet in the buffer will not be handled,
35 * then the following packets directed for other endpoints and functions
36 * will be blocked.
37 * Additionally the packets directed to one endpoint can block entire on-chip
38 * buffers. In this case transfer to other endpoints also will blocked.
39 *
40 * To resolve this issue after raising the descriptor missing interrupt
41 * driver prepares internal usb_request object and use it to arm DMA transfer.
42 *
43 * The problematic situation was observed in case when endpoint has been enabled
44 * but no usb_request were queued. Driver try detects such endpoints and will
45 * use this workaround only for these endpoint.
46 *
47 * Driver use limited number of buffer. This number can be set by macro
48 * CDNS3_WA2_NUM_BUFFERS.
49 *
50 * Such blocking situation was observed on ACM gadget. For this function
51 * host send OUT data packet but ACM function is not prepared for this packet.
52 * It's cause that buffer placed in on chip memory block transfer to other
53 * endpoints.
54 *
55 * Issue has been fixed in DEV_VER_V2 version of controller.
56 *
Pawel Laszczak7733f6c2019-08-26 12:19:30 +010057 */
58
59#include <linux/dma-mapping.h>
60#include <linux/usb/gadget.h>
61#include <linux/module.h>
Sanket Parmarb9b1eae2021-03-09 06:19:39 +010062#include <linux/dmapool.h>
Pawel Laszczak7733f6c2019-08-26 12:19:30 +010063#include <linux/iopoll.h>
64
65#include "core.h"
66#include "gadget-export.h"
Pawel Laszczak64b558f2020-12-07 11:32:26 +010067#include "cdns3-gadget.h"
68#include "cdns3-trace.h"
Pawel Laszczak7733f6c2019-08-26 12:19:30 +010069#include "drd.h"
70
71static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
72 struct usb_request *request,
73 gfp_t gfp_flags);
74
Jayshri Pawar54c4c692019-12-13 06:25:42 +010075static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
76 struct usb_request *request);
77
78static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
79 struct usb_request *request);
80
81/**
82 * cdns3_clear_register_bit - clear bit in given register.
83 * @ptr: address of device controller register to be read and changed
84 * @mask: bits requested to clar
85 */
Jason Yane9010322020-04-02 20:38:37 +080086static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
Jayshri Pawar54c4c692019-12-13 06:25:42 +010087{
88 mask = readl(ptr) & ~mask;
89 writel(mask, ptr);
90}
91
Pawel Laszczak7733f6c2019-08-26 12:19:30 +010092/**
93 * cdns3_set_register_bit - set bit in given register.
94 * @ptr: address of device controller register to be read and changed
95 * @mask: bits requested to set
96 */
97void cdns3_set_register_bit(void __iomem *ptr, u32 mask)
98{
99 mask = readl(ptr) | mask;
100 writel(mask, ptr);
101}
102
103/**
104 * cdns3_ep_addr_to_index - Macro converts endpoint address to
105 * index of endpoint object in cdns3_device.eps[] container
106 * @ep_addr: endpoint address for which endpoint object is required
107 *
108 */
109u8 cdns3_ep_addr_to_index(u8 ep_addr)
110{
111 return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0));
112}
113
114static int cdns3_get_dma_pos(struct cdns3_device *priv_dev,
115 struct cdns3_endpoint *priv_ep)
116{
117 int dma_index;
118
119 dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma;
120
121 return dma_index / TRB_SIZE;
122}
123
124/**
125 * cdns3_next_request - returns next request from list
126 * @list: list containing requests
127 *
128 * Returns request or NULL if no requests in list
129 */
130struct usb_request *cdns3_next_request(struct list_head *list)
131{
132 return list_first_entry_or_null(list, struct usb_request, list);
133}
134
135/**
136 * cdns3_next_align_buf - returns next buffer from list
137 * @list: list containing buffers
138 *
139 * Returns buffer or NULL if no buffers in list
140 */
Jason Yane9010322020-04-02 20:38:37 +0800141static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100142{
143 return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
144}
145
146/**
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100147 * cdns3_next_priv_request - returns next request from list
148 * @list: list containing requests
149 *
150 * Returns request or NULL if no requests in list
151 */
Jason Yane9010322020-04-02 20:38:37 +0800152static struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100153{
154 return list_first_entry_or_null(list, struct cdns3_request, list);
155}
156
157/**
Lee Jones56480a02021-05-26 14:00:20 +0100158 * cdns3_select_ep - selects endpoint
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100159 * @priv_dev: extended gadget object
160 * @ep: endpoint address
161 */
162void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
163{
164 if (priv_dev->selected_ep == ep)
165 return;
166
167 priv_dev->selected_ep = ep;
168 writel(ep, &priv_dev->regs->ep_sel);
169}
170
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100171/**
172 * cdns3_get_tdl - gets current tdl for selected endpoint.
173 * @priv_dev: extended gadget object
174 *
175 * Before calling this function the appropriate endpoint must
176 * be selected by means of cdns3_select_ep function.
177 */
178static int cdns3_get_tdl(struct cdns3_device *priv_dev)
179{
180 if (priv_dev->dev_ver < DEV_VER_V3)
181 return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
182 else
183 return readl(&priv_dev->regs->ep_tdl);
184}
185
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100186dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
187 struct cdns3_trb *trb)
188{
189 u32 offset = (char *)trb - (char *)priv_ep->trb_pool;
190
191 return priv_ep->trb_pool_dma + offset;
192}
193
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100194static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
195{
196 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
197
198 if (priv_ep->trb_pool) {
Sanket Parmarb9b1eae2021-03-09 06:19:39 +0100199 dma_pool_free(priv_dev->eps_dma_pool,
200 priv_ep->trb_pool, priv_ep->trb_pool_dma);
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100201 priv_ep->trb_pool = NULL;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100202 }
203}
204
205/**
206 * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint
207 * @priv_ep: endpoint object
208 *
209 * Function will return 0 on success or -ENOMEM on allocation error
210 */
211int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
212{
213 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
Sanket Parmarb9b1eae2021-03-09 06:19:39 +0100214 int ring_size = TRB_RING_SIZE;
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100215 int num_trbs = ring_size / TRB_SIZE;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100216 struct cdns3_trb *link_trb;
217
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100218 if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size)
219 cdns3_free_trb_pool(priv_ep);
220
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100221 if (!priv_ep->trb_pool) {
Sanket Parmarb9b1eae2021-03-09 06:19:39 +0100222 priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool,
223 GFP_DMA32 | GFP_ATOMIC,
224 &priv_ep->trb_pool_dma);
225
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100226 if (!priv_ep->trb_pool)
227 return -ENOMEM;
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100228
229 priv_ep->alloc_ring_size = ring_size;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100230 }
231
Peter Chen95f5acf2020-07-22 11:06:19 +0800232 memset(priv_ep->trb_pool, 0, ring_size);
233
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100234 priv_ep->num_trbs = num_trbs;
235
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100236 if (!priv_ep->num)
237 return 0;
238
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100239 /* Initialize the last TRB as Link TRB */
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100240 link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100241
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100242 if (priv_ep->use_streams) {
243 /*
244 * For stream capable endpoints driver use single correct TRB.
245 * The last trb has zeroed cycle bit
246 */
247 link_trb->control = 0;
248 } else {
Peter Chen8dafb3c2020-08-21 11:14:37 +0800249 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma));
250 link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100251 }
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100252 return 0;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100253}
254
255/**
256 * cdns3_ep_stall_flush - Stalls and flushes selected endpoint
257 * @priv_ep: endpoint object
258 *
259 * Endpoint must be selected before call to this function
260 */
261static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
262{
263 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
264 int val;
265
266 trace_cdns3_halt(priv_ep, 1, 1);
267
268 writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL,
269 &priv_dev->regs->ep_cmd);
270
271 /* wait for DFLUSH cleared */
272 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
273 !(val & EP_CMD_DFLUSH), 1, 1000);
274 priv_ep->flags |= EP_STALLED;
275 priv_ep->flags &= ~EP_STALL_PENDING;
276}
277
278/**
279 * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller.
280 * @priv_dev: extended gadget object
281 */
282void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
283{
Pawel Laszczak52d39672020-10-22 08:55:05 +0800284 int i;
285
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100286 writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf);
287
288 cdns3_allow_enable_l1(priv_dev, 0);
289 priv_dev->hw_configured_flag = 0;
290 priv_dev->onchip_used_size = 0;
291 priv_dev->out_mem_is_allocated = 0;
292 priv_dev->wait_for_setup = 0;
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100293 priv_dev->using_streams = 0;
Pawel Laszczak52d39672020-10-22 08:55:05 +0800294
295 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
296 if (priv_dev->eps[i])
297 priv_dev->eps[i]->flags &= ~EP_CONFIGURED;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100298}
299
300/**
301 * cdns3_ep_inc_trb - increment a trb index.
302 * @index: Pointer to the TRB index to increment.
303 * @cs: Cycle state
304 * @trb_in_seg: number of TRBs in segment
305 *
306 * The index should never point to the link TRB. After incrementing,
307 * if it is point to the link TRB, wrap around to the beginning and revert
308 * cycle state bit The
309 * link TRB is always at the last TRB entry.
310 */
311static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
312{
313 (*index)++;
314 if (*index == (trb_in_seg - 1)) {
315 *index = 0;
316 *cs ^= 1;
317 }
318}
319
320/**
321 * cdns3_ep_inc_enq - increment endpoint's enqueue pointer
322 * @priv_ep: The endpoint whose enqueue pointer we're incrementing
323 */
324static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep)
325{
326 priv_ep->free_trbs--;
327 cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs);
328}
329
330/**
331 * cdns3_ep_inc_deq - increment endpoint's dequeue pointer
332 * @priv_ep: The endpoint whose dequeue pointer we're incrementing
333 */
334static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
335{
336 priv_ep->free_trbs++;
337 cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
338}
339
Jason Yane9010322020-04-02 20:38:37 +0800340static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100341{
342 struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
343 int current_trb = priv_req->start_trb;
344
345 while (current_trb != priv_req->end_trb) {
346 cdns3_ep_inc_deq(priv_ep);
347 current_trb = priv_ep->dequeue;
348 }
349
350 cdns3_ep_inc_deq(priv_ep);
351}
352
353/**
354 * cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
355 * @priv_dev: Extended gadget object
356 * @enable: Enable/disable permit to transition to L1.
357 *
358 * If bit USB_CONF_L1EN is set and device receive Extended Token packet,
359 * then controller answer with ACK handshake.
360 * If bit USB_CONF_L1DS is set and device receive Extended Token packet,
361 * then controller answer with NYET handshake.
362 */
363void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable)
364{
365 if (enable)
366 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf);
367 else
368 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf);
369}
370
371enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev)
372{
373 u32 reg;
374
375 reg = readl(&priv_dev->regs->usb_sts);
376
377 if (DEV_SUPERSPEED(reg))
378 return USB_SPEED_SUPER;
379 else if (DEV_HIGHSPEED(reg))
380 return USB_SPEED_HIGH;
381 else if (DEV_FULLSPEED(reg))
382 return USB_SPEED_FULL;
383 else if (DEV_LOWSPEED(reg))
384 return USB_SPEED_LOW;
385 return USB_SPEED_UNKNOWN;
386}
387
388/**
389 * cdns3_start_all_request - add to ring all request not started
390 * @priv_dev: Extended gadget object
391 * @priv_ep: The endpoint for whom request will be started.
392 *
393 * Returns return ENOMEM if transfer ring i not enough TRBs to start
394 * all requests.
395 */
396static int cdns3_start_all_request(struct cdns3_device *priv_dev,
397 struct cdns3_endpoint *priv_ep)
398{
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100399 struct usb_request *request;
400 int ret = 0;
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100401 u8 pending_empty = list_empty(&priv_ep->pending_req_list);
402
403 /*
404 * If the last pending transfer is INTERNAL
405 * OR streams are enabled for this endpoint
406 * do NOT start new transfer till the last one is pending
407 */
408 if (!pending_empty) {
409 struct cdns3_request *priv_req;
410
411 request = cdns3_next_request(&priv_ep->pending_req_list);
412 priv_req = to_cdns3_request(request);
413 if ((priv_req->flags & REQUEST_INTERNAL) ||
414 (priv_ep->flags & EP_TDLCHK_EN) ||
415 priv_ep->use_streams) {
Nicolas Boichatb3a5ce82020-06-27 15:03:04 +0800416 dev_dbg(priv_dev->dev, "Blocking external request\n");
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100417 return ret;
418 }
419 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100420
421 while (!list_empty(&priv_ep->deferred_req_list)) {
422 request = cdns3_next_request(&priv_ep->deferred_req_list);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100423
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100424 if (!priv_ep->use_streams) {
425 ret = cdns3_ep_run_transfer(priv_ep, request);
426 } else {
427 priv_ep->stream_sg_idx = 0;
428 ret = cdns3_ep_run_stream_transfer(priv_ep, request);
429 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100430 if (ret)
431 return ret;
432
433 list_del(&request->list);
434 list_add_tail(&request->list,
435 &priv_ep->pending_req_list);
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100436 if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN))
437 break;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100438 }
439
440 priv_ep->flags &= ~EP_RING_FULL;
441 return ret;
442}
443
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100444/*
445 * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set
446 * driver try to detect whether endpoint need additional internal
447 * buffer for unblocking on-chip FIFO buffer. This flag will be cleared
448 * if before first DESCMISS interrupt the DMA will be armed.
449 */
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100450#define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100451 if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
452 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
453 (reg) |= EP_STS_EN_DESCMISEN; \
454 } } while (0)
455
Peter Chen141e70f2020-09-10 17:11:28 +0800456static void __cdns3_descmiss_copy_data(struct usb_request *request,
457 struct usb_request *descmiss_req)
458{
459 int length = request->actual + descmiss_req->actual;
460 struct scatterlist *s = request->sg;
461
462 if (!s) {
463 if (length <= request->length) {
464 memcpy(&((u8 *)request->buf)[request->actual],
465 descmiss_req->buf,
466 descmiss_req->actual);
467 request->actual = length;
468 } else {
469 /* It should never occures */
470 request->status = -ENOMEM;
471 }
472 } else {
473 if (length <= sg_dma_len(s)) {
474 void *p = phys_to_virt(sg_dma_address(s));
475
476 memcpy(&((u8 *)p)[request->actual],
477 descmiss_req->buf,
478 descmiss_req->actual);
479 request->actual = length;
480 } else {
481 request->status = -ENOMEM;
482 }
483 }
484}
485
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100486/**
487 * cdns3_wa2_descmiss_copy_data copy data from internal requests to
488 * request queued by class driver.
489 * @priv_ep: extended endpoint object
490 * @request: request object
491 */
492static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
493 struct usb_request *request)
494{
495 struct usb_request *descmiss_req;
496 struct cdns3_request *descmiss_priv_req;
497
498 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
499 int chunk_end;
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100500
501 descmiss_priv_req =
502 cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
503 descmiss_req = &descmiss_priv_req->request;
504
505 /* driver can't touch pending request */
506 if (descmiss_priv_req->flags & REQUEST_PENDING)
507 break;
508
509 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH;
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100510 request->status = descmiss_req->status;
Peter Chen141e70f2020-09-10 17:11:28 +0800511 __cdns3_descmiss_copy_data(request, descmiss_req);
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100512 list_del_init(&descmiss_priv_req->list);
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100513 kfree(descmiss_req->buf);
514 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req);
515 --priv_ep->wa2_counter;
516
517 if (!chunk_end)
518 break;
519 }
520}
521
Jason Yane9010322020-04-02 20:38:37 +0800522static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
kbuild test robote2e77a92020-03-27 09:12:01 +0800523 struct cdns3_endpoint *priv_ep,
524 struct cdns3_request *priv_req)
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100525{
526 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN &&
527 priv_req->flags & REQUEST_INTERNAL) {
528 struct usb_request *req;
529
530 req = cdns3_next_request(&priv_ep->deferred_req_list);
531
532 priv_ep->descmis_req = NULL;
533
534 if (!req)
535 return NULL;
536
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100537 /* unmap the gadget request before copying data */
538 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req,
539 priv_ep->dir);
540
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100541 cdns3_wa2_descmiss_copy_data(priv_ep, req);
542 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
543 req->length != req->actual) {
544 /* wait for next part of transfer */
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100545 /* re-map the gadget request buffer*/
546 usb_gadget_map_request_by_dev(priv_dev->sysdev, req,
547 usb_endpoint_dir_in(priv_ep->endpoint.desc));
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100548 return NULL;
549 }
550
551 if (req->status == -EINPROGRESS)
552 req->status = 0;
553
554 list_del_init(&req->list);
555 cdns3_start_all_request(priv_dev, priv_ep);
556 return req;
557 }
558
559 return &priv_req->request;
560}
561
Jason Yane9010322020-04-02 20:38:37 +0800562static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
kbuild test robote2e77a92020-03-27 09:12:01 +0800563 struct cdns3_endpoint *priv_ep,
564 struct cdns3_request *priv_req)
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100565{
566 int deferred = 0;
567
568 /*
569 * If transfer was queued before DESCMISS appear than we
570 * can disable handling of DESCMISS interrupt. Driver assumes that it
571 * can disable special treatment for this endpoint.
572 */
573 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
574 u32 reg;
575
576 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir);
577 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
578 reg = readl(&priv_dev->regs->ep_sts_en);
579 reg &= ~EP_STS_EN_DESCMISEN;
580 trace_cdns3_wa2(priv_ep, "workaround disabled\n");
581 writel(reg, &priv_dev->regs->ep_sts_en);
582 }
583
584 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
585 u8 pending_empty = list_empty(&priv_ep->pending_req_list);
586 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list);
587
588 /*
589 * DESCMISS transfer has been finished, so data will be
590 * directly copied from internal allocated usb_request
591 * objects.
592 */
593 if (pending_empty && !descmiss_empty &&
594 !(priv_req->flags & REQUEST_INTERNAL)) {
595 cdns3_wa2_descmiss_copy_data(priv_ep,
596 &priv_req->request);
597
598 trace_cdns3_wa2(priv_ep, "get internal stored data");
599
600 list_add_tail(&priv_req->request.list,
601 &priv_ep->pending_req_list);
602 cdns3_gadget_giveback(priv_ep, priv_req,
603 priv_req->request.status);
604
605 /*
606 * Intentionally driver returns positive value as
607 * correct value. It informs that transfer has
608 * been finished.
609 */
610 return EINPROGRESS;
611 }
612
613 /*
614 * Driver will wait for completion DESCMISS transfer,
615 * before starts new, not DESCMISS transfer.
616 */
617 if (!pending_empty && !descmiss_empty) {
618 trace_cdns3_wa2(priv_ep, "wait for pending transfer\n");
619 deferred = 1;
620 }
621
622 if (priv_req->flags & REQUEST_INTERNAL)
623 list_add_tail(&priv_req->list,
624 &priv_ep->wa2_descmiss_req_list);
625 }
626
627 return deferred;
628}
629
630static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep)
631{
632 struct cdns3_request *priv_req;
633
634 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
635 u8 chain;
636
637 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
638 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH);
639
640 trace_cdns3_wa2(priv_ep, "removes eldest request");
641
642 kfree(priv_req->request.buf);
643 cdns3_gadget_ep_free_request(&priv_ep->endpoint,
644 &priv_req->request);
645 list_del_init(&priv_req->list);
646 --priv_ep->wa2_counter;
647
648 if (!chain)
649 break;
650 }
651}
652
653/**
654 * cdns3_wa2_descmissing_packet - handles descriptor missing event.
Lee Jones4a35aa62020-07-02 15:46:12 +0100655 * @priv_ep: extended gadget object
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100656 *
657 * This function is used only for WA2. For more information see Work around 2
658 * description.
659 */
660static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
661{
662 struct cdns3_request *priv_req;
663 struct usb_request *request;
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100664 u8 pending_empty = list_empty(&priv_ep->pending_req_list);
665
666 /* check for pending transfer */
667 if (!pending_empty) {
668 trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n");
669 return;
670 }
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100671
672 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
673 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
674 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN;
675 }
676
677 trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
678
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100679 if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) {
680 trace_cdns3_wa2(priv_ep, "WA2 overflow\n");
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100681 cdns3_wa2_remove_old_request(priv_ep);
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100682 }
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100683
684 request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
685 GFP_ATOMIC);
686 if (!request)
687 goto err;
688
689 priv_req = to_cdns3_request(request);
690 priv_req->flags |= REQUEST_INTERNAL;
691
692 /* if this field is still assigned it indicate that transfer related
693 * with this request has not been finished yet. Driver in this
694 * case simply allocate next request and assign flag REQUEST_INTERNAL_CH
695 * flag to previous one. It will indicate that current request is
696 * part of the previous one.
697 */
698 if (priv_ep->descmis_req)
699 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH;
700
701 priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE,
702 GFP_ATOMIC);
703 priv_ep->wa2_counter++;
704
705 if (!priv_req->request.buf) {
706 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
707 goto err;
708 }
709
710 priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE;
711 priv_ep->descmis_req = priv_req;
712
713 __cdns3_gadget_ep_queue(&priv_ep->endpoint,
714 &priv_ep->descmis_req->request,
715 GFP_ATOMIC);
716
717 return;
718
719err:
720 dev_err(priv_ep->cdns3_dev->dev,
721 "Failed: No sufficient memory for DESCMIS\n");
722}
723
Jayshri Pawar54c4c692019-12-13 06:25:42 +0100724static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev)
725{
726 u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
727
728 if (tdl) {
729 u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl;
730
731 writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL,
732 &priv_dev->regs->ep_cmd);
733 }
734}
735
736static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev)
737{
738 u32 ep_sts_reg;
739
740 /* select EP0-out */
741 cdns3_select_ep(priv_dev, 0);
742
743 ep_sts_reg = readl(&priv_dev->regs->ep_sts);
744
745 if (EP_STS_OUTQ_VAL(ep_sts_reg)) {
746 u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg);
747 struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num];
748
749 if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) &&
750 outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) {
751 u8 pending_empty = list_empty(&outq_ep->pending_req_list);
752
753 if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) ||
754 (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) ||
755 !pending_empty) {
756 } else {
757 u32 ep_sts_en_reg;
758 u32 ep_cmd_reg;
759
760 cdns3_select_ep(priv_dev, outq_ep->num |
761 outq_ep->dir);
762 ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en);
763 ep_cmd_reg = readl(&priv_dev->regs->ep_cmd);
764
765 outq_ep->flags |= EP_TDLCHK_EN;
766 cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
767 EP_CFG_TDL_CHK);
768
769 cdns3_wa2_enable_detection(priv_dev, outq_ep,
770 ep_sts_en_reg);
771 writel(ep_sts_en_reg,
772 &priv_dev->regs->ep_sts_en);
773 /* reset tdl value to zero */
774 cdns3_wa2_reset_tdl(priv_dev);
775 /*
776 * Memory barrier - Reset tdl before ringing the
777 * doorbell.
778 */
779 wmb();
780 if (EP_CMD_DRDY & ep_cmd_reg) {
781 trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n");
782
783 } else {
784 trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n");
785 /*
786 * ring doorbell to generate DESCMIS irq
787 */
788 writel(EP_CMD_DRDY,
789 &priv_dev->regs->ep_cmd);
790 }
791 }
792 }
793 }
794}
795
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100796/**
797 * cdns3_gadget_giveback - call struct usb_request's ->complete callback
798 * @priv_ep: The endpoint to whom the request belongs to
799 * @priv_req: The request we're giving back
800 * @status: completion code for the request
801 *
802 * Must be called with controller's lock held and interrupts disabled. This
803 * function will unmap @req and call its ->complete() callback to notify upper
804 * layers that it has completed.
805 */
806void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
807 struct cdns3_request *priv_req,
808 int status)
809{
810 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
811 struct usb_request *request = &priv_req->request;
812
813 list_del_init(&request->list);
814
815 if (request->status == -EINPROGRESS)
816 request->status = status;
817
818 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request,
819 priv_ep->dir);
820
821 if ((priv_req->flags & REQUEST_UNALIGNED) &&
Sanket Parmar8430e982021-03-22 11:26:30 +0100822 priv_ep->dir == USB_DIR_OUT && !request->status) {
823 /* Make DMA buffer CPU accessible */
824 dma_sync_single_for_cpu(priv_dev->sysdev,
825 priv_req->aligned_buf->dma,
826 priv_req->aligned_buf->size,
827 priv_req->aligned_buf->dir);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100828 memcpy(request->buf, priv_req->aligned_buf->buf,
829 request->length);
Sanket Parmar8430e982021-03-22 11:26:30 +0100830 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100831
832 priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED);
Peter Chen249f0a22020-09-10 17:11:27 +0800833 /* All TRBs have finished, clear the counter */
834 priv_req->finished_trb = 0;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100835 trace_cdns3_gadget_giveback(priv_req);
836
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +0100837 if (priv_dev->dev_ver < DEV_VER_V2) {
838 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep,
839 priv_req);
840 if (!request)
841 return;
842 }
843
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100844 if (request->complete) {
845 spin_unlock(&priv_dev->lock);
846 usb_gadget_giveback_request(&priv_ep->endpoint,
847 request);
848 spin_lock(&priv_dev->lock);
849 }
850
851 if (request->buf == priv_dev->zlp_buf)
852 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
853}
854
Jason Yane9010322020-04-02 20:38:37 +0800855static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100856{
857 /* Work around for stale data address in TRB*/
858 if (priv_ep->wa1_set) {
859 trace_cdns3_wa1(priv_ep, "restore cycle bit");
860
861 priv_ep->wa1_set = 0;
862 priv_ep->wa1_trb_index = 0xFFFF;
863 if (priv_ep->wa1_cycle_bit) {
864 priv_ep->wa1_trb->control =
Peter Chen8dafb3c2020-08-21 11:14:37 +0800865 priv_ep->wa1_trb->control | cpu_to_le32(0x1);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100866 } else {
867 priv_ep->wa1_trb->control =
Peter Chen8dafb3c2020-08-21 11:14:37 +0800868 priv_ep->wa1_trb->control & cpu_to_le32(~0x1);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100869 }
870 }
871}
872
873static void cdns3_free_aligned_request_buf(struct work_struct *work)
874{
875 struct cdns3_device *priv_dev = container_of(work, struct cdns3_device,
876 aligned_buf_wq);
877 struct cdns3_aligned_buf *buf, *tmp;
878 unsigned long flags;
879
880 spin_lock_irqsave(&priv_dev->lock, flags);
881
882 list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) {
883 if (!buf->in_use) {
884 list_del(&buf->list);
885
886 /*
887 * Re-enable interrupts to free DMA capable memory.
888 * Driver can't free this memory with disabled
889 * interrupts.
890 */
891 spin_unlock_irqrestore(&priv_dev->lock, flags);
Sanket Parmar8430e982021-03-22 11:26:30 +0100892 dma_free_noncoherent(priv_dev->sysdev, buf->size,
893 buf->buf, buf->dma, buf->dir);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100894 kfree(buf);
895 spin_lock_irqsave(&priv_dev->lock, flags);
896 }
897 }
898
899 spin_unlock_irqrestore(&priv_dev->lock, flags);
900}
901
902static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req)
903{
904 struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
905 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
906 struct cdns3_aligned_buf *buf;
907
908 /* check if buffer is aligned to 8. */
909 if (!((uintptr_t)priv_req->request.buf & 0x7))
910 return 0;
911
912 buf = priv_req->aligned_buf;
913
914 if (!buf || priv_req->request.length > buf->size) {
915 buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
916 if (!buf)
917 return -ENOMEM;
918
919 buf->size = priv_req->request.length;
Sanket Parmar8430e982021-03-22 11:26:30 +0100920 buf->dir = usb_endpoint_dir_in(priv_ep->endpoint.desc) ?
921 DMA_TO_DEVICE : DMA_FROM_DEVICE;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100922
Sanket Parmar8430e982021-03-22 11:26:30 +0100923 buf->buf = dma_alloc_noncoherent(priv_dev->sysdev,
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100924 buf->size,
925 &buf->dma,
Sanket Parmar8430e982021-03-22 11:26:30 +0100926 buf->dir,
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100927 GFP_ATOMIC);
928 if (!buf->buf) {
929 kfree(buf);
930 return -ENOMEM;
931 }
932
933 if (priv_req->aligned_buf) {
934 trace_cdns3_free_aligned_request(priv_req);
935 priv_req->aligned_buf->in_use = 0;
936 queue_work(system_freezable_wq,
937 &priv_dev->aligned_buf_wq);
938 }
939
940 buf->in_use = 1;
941 priv_req->aligned_buf = buf;
942
943 list_add_tail(&buf->list,
944 &priv_dev->aligned_buf_list);
945 }
946
947 if (priv_ep->dir == USB_DIR_IN) {
Sanket Parmar8430e982021-03-22 11:26:30 +0100948 /* Make DMA buffer CPU accessible */
949 dma_sync_single_for_cpu(priv_dev->sysdev,
950 buf->dma, buf->size, buf->dir);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100951 memcpy(buf->buf, priv_req->request.buf,
952 priv_req->request.length);
953 }
954
Sanket Parmar8430e982021-03-22 11:26:30 +0100955 /* Transfer DMA buffer ownership back to device */
956 dma_sync_single_for_device(priv_dev->sysdev,
957 buf->dma, buf->size, buf->dir);
958
Pawel Laszczak7733f6c2019-08-26 12:19:30 +0100959 priv_req->flags |= REQUEST_UNALIGNED;
960 trace_cdns3_prepare_aligned_request(priv_req);
961
962 return 0;
963}
964
965static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep,
966 struct cdns3_trb *trb)
967{
968 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
969
970 if (!priv_ep->wa1_set) {
971 u32 doorbell;
972
973 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
974
975 if (doorbell) {
976 priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0;
977 priv_ep->wa1_set = 1;
978 priv_ep->wa1_trb = trb;
979 priv_ep->wa1_trb_index = priv_ep->enqueue;
980 trace_cdns3_wa1(priv_ep, "set guard");
981 return 0;
982 }
983 }
984 return 1;
985}
986
987static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
988 struct cdns3_endpoint *priv_ep)
989{
990 int dma_index;
991 u32 doorbell;
992
993 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
994 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
995
996 if (!doorbell || dma_index != priv_ep->wa1_trb_index)
997 cdns3_wa1_restore_cycle_bit(priv_ep);
998}
999
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001000static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
1001 struct usb_request *request)
1002{
1003 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1004 struct cdns3_request *priv_req;
1005 struct cdns3_trb *trb;
1006 dma_addr_t trb_dma;
1007 int address;
1008 u32 control;
1009 u32 length;
1010 u32 tdl;
1011 unsigned int sg_idx = priv_ep->stream_sg_idx;
1012
1013 priv_req = to_cdns3_request(request);
1014 address = priv_ep->endpoint.desc->bEndpointAddress;
1015
1016 priv_ep->flags |= EP_PENDING_REQUEST;
1017
1018 /* must allocate buffer aligned to 8 */
1019 if (priv_req->flags & REQUEST_UNALIGNED)
1020 trb_dma = priv_req->aligned_buf->dma;
1021 else
1022 trb_dma = request->dma;
1023
1024 /* For stream capable endpoints driver use only single TD. */
1025 trb = priv_ep->trb_pool + priv_ep->enqueue;
1026 priv_req->start_trb = priv_ep->enqueue;
1027 priv_req->end_trb = priv_req->start_trb;
1028 priv_req->trb = trb;
1029
1030 cdns3_select_ep(priv_ep->cdns3_dev, address);
1031
1032 control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE |
1033 TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP;
1034
1035 if (!request->num_sgs) {
Peter Chen8dafb3c2020-08-21 11:14:37 +08001036 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001037 length = request->length;
1038 } else {
Peter Chen8dafb3c2020-08-21 11:14:37 +08001039 trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address));
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001040 length = request->sg[sg_idx].length;
1041 }
1042
1043 tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket);
1044
Peter Chen8dafb3c2020-08-21 11:14:37 +08001045 trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length));
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001046
1047 /*
1048 * For DEV_VER_V2 controller version we have enabled
1049 * USB_CONF2_EN_TDL_TRB in DMULT configuration.
1050 * This enables TDL calculation based on TRB, hence setting TDL in TRB.
1051 */
1052 if (priv_dev->dev_ver >= DEV_VER_V2) {
1053 if (priv_dev->gadget.speed == USB_SPEED_SUPER)
Peter Chen8dafb3c2020-08-21 11:14:37 +08001054 trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl));
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001055 }
1056 priv_req->flags |= REQUEST_PENDING;
1057
Peter Chen8dafb3c2020-08-21 11:14:37 +08001058 trb->control = cpu_to_le32(control);
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001059
1060 trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
1061
1062 /*
1063 * Memory barrier - Cycle Bit must be set before trb->length and
1064 * trb->buffer fields.
1065 */
1066 wmb();
1067
1068 /* always first element */
1069 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma),
1070 &priv_dev->regs->ep_traddr);
1071
1072 if (!(priv_ep->flags & EP_STALLED)) {
1073 trace_cdns3_ring(priv_ep);
1074 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
1075 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
1076
1077 priv_ep->prime_flag = false;
1078
1079 /*
1080 * Controller version DEV_VER_V2 tdl calculation
1081 * is based on TRB
1082 */
1083
1084 if (priv_dev->dev_ver < DEV_VER_V2)
1085 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
1086 &priv_dev->regs->ep_cmd);
1087 else if (priv_dev->dev_ver > DEV_VER_V2)
1088 writel(tdl, &priv_dev->regs->ep_tdl);
1089
1090 priv_ep->last_stream_id = priv_req->request.stream_id;
1091 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
1092 writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) |
1093 EP_CMD_ERDY, &priv_dev->regs->ep_cmd);
1094
1095 trace_cdns3_doorbell_epx(priv_ep->name,
1096 readl(&priv_dev->regs->ep_traddr));
1097 }
1098
1099 /* WORKAROUND for transition to L0 */
1100 __cdns3_gadget_wakeup(priv_dev);
1101
1102 return 0;
1103}
1104
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001105/**
1106 * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
1107 * @priv_ep: endpoint object
Lee Jones4a35aa62020-07-02 15:46:12 +01001108 * @request: request object
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001109 *
1110 * Returns zero on success or negative value on failure
1111 */
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001112static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
1113 struct usb_request *request)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001114{
1115 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1116 struct cdns3_request *priv_req;
1117 struct cdns3_trb *trb;
Peter Chen78e91582020-11-03 19:23:27 +08001118 struct cdns3_trb *link_trb = NULL;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001119 dma_addr_t trb_dma;
1120 u32 togle_pcs = 1;
1121 int sg_iter = 0;
1122 int num_trb;
1123 int address;
1124 u32 control;
1125 int pcs;
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001126 u16 total_tdl = 0;
Peter Chenabc6b572020-09-10 17:11:23 +08001127 struct scatterlist *s = NULL;
1128 bool sg_supported = !!(request->num_mapped_sgs);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001129
1130 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
1131 num_trb = priv_ep->interval;
1132 else
Peter Chenabc6b572020-09-10 17:11:23 +08001133 num_trb = sg_supported ? request->num_mapped_sgs : 1;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001134
1135 if (num_trb > priv_ep->free_trbs) {
1136 priv_ep->flags |= EP_RING_FULL;
1137 return -ENOBUFS;
1138 }
1139
1140 priv_req = to_cdns3_request(request);
1141 address = priv_ep->endpoint.desc->bEndpointAddress;
1142
1143 priv_ep->flags |= EP_PENDING_REQUEST;
1144
1145 /* must allocate buffer aligned to 8 */
1146 if (priv_req->flags & REQUEST_UNALIGNED)
1147 trb_dma = priv_req->aligned_buf->dma;
1148 else
1149 trb_dma = request->dma;
1150
1151 trb = priv_ep->trb_pool + priv_ep->enqueue;
1152 priv_req->start_trb = priv_ep->enqueue;
1153 priv_req->trb = trb;
1154
1155 cdns3_select_ep(priv_ep->cdns3_dev, address);
1156
1157 /* prepare ring */
1158 if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) {
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001159 int doorbell, dma_index;
1160 u32 ch_bit = 0;
1161
1162 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
1163 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
1164
1165 /* Driver can't update LINK TRB if it is current processed. */
1166 if (doorbell && dma_index == priv_ep->num_trbs - 1) {
1167 priv_ep->flags |= EP_DEFERRED_DRDY;
1168 return -ENOBUFS;
1169 }
1170
1171 /*updating C bt in Link TRB before starting DMA*/
1172 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1);
1173 /*
1174 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes
1175 * that DMA stuck at the LINK TRB.
1176 * On the other hand, removing TRB_CHAIN for longer TRs for
1177 * epXout cause that DMA stuck after handling LINK TRB.
1178 * To eliminate this strange behavioral driver set TRB_CHAIN
1179 * bit only for TR size > 2.
1180 */
1181 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC ||
1182 TRBS_PER_SEGMENT > 2)
1183 ch_bit = TRB_CHAIN;
1184
Peter Chen8dafb3c2020-08-21 11:14:37 +08001185 link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
1186 TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001187 }
1188
1189 if (priv_dev->dev_ver <= DEV_VER_V2)
1190 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
1191
Peter Chenabc6b572020-09-10 17:11:23 +08001192 if (sg_supported)
1193 s = request->sg;
1194
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001195 /* set incorrect Cycle Bit for first trb*/
1196 control = priv_ep->pcs ? 0 : TRB_CYCLE;
Peter Chen40252dd2020-11-03 22:16:00 +08001197 trb->length = 0;
1198 if (priv_dev->dev_ver >= DEV_VER_V2) {
1199 u16 td_size;
1200
1201 td_size = DIV_ROUND_UP(request->length,
1202 priv_ep->endpoint.maxpacket);
1203 if (priv_dev->gadget.speed == USB_SPEED_SUPER)
Pawel Laszczakfba87012020-12-14 12:04:33 +01001204 trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size));
Peter Chen40252dd2020-11-03 22:16:00 +08001205 else
1206 control |= TRB_TDL_HS_SIZE(td_size);
1207 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001208
1209 do {
1210 u32 length;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001211
1212 /* fill TRB */
1213 control |= TRB_TYPE(TRB_NORMAL);
Peter Chenabc6b572020-09-10 17:11:23 +08001214 if (sg_supported) {
1215 trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s)));
1216 length = sg_dma_len(s);
1217 } else {
1218 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001219 length = request->length;
Peter Chenabc6b572020-09-10 17:11:23 +08001220 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001221
Peter Chen40252dd2020-11-03 22:16:00 +08001222 if (priv_ep->flags & EP_TDLCHK_EN)
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001223 total_tdl += DIV_ROUND_UP(length,
1224 priv_ep->endpoint.maxpacket);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001225
Peter Chen40252dd2020-11-03 22:16:00 +08001226 trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
Peter Chen8dafb3c2020-08-21 11:14:37 +08001227 TRB_LEN(length));
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001228 pcs = priv_ep->pcs ? TRB_CYCLE : 0;
1229
1230 /*
1231 * first trb should be prepared as last to avoid processing
1232 * transfer to early
1233 */
1234 if (sg_iter != 0)
1235 control |= pcs;
1236
1237 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
1238 control |= TRB_IOC | TRB_ISP;
1239 } else {
1240 /* for last element in TD or in SG list */
1241 if (sg_iter == (num_trb - 1) && sg_iter != 0)
1242 control |= pcs | TRB_IOC | TRB_ISP;
1243 }
1244
1245 if (sg_iter)
Peter Chen8dafb3c2020-08-21 11:14:37 +08001246 trb->control = cpu_to_le32(control);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001247 else
Peter Chen8dafb3c2020-08-21 11:14:37 +08001248 priv_req->trb->control = cpu_to_le32(control);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001249
Peter Chen87e1dcd2020-09-10 17:11:26 +08001250 if (sg_supported) {
Pawel Laszczakfba87012020-12-14 12:04:33 +01001251 trb->control |= cpu_to_le32(TRB_ISP);
Peter Chen87e1dcd2020-09-10 17:11:26 +08001252 /* Don't set chain bit for last TRB */
1253 if (sg_iter < num_trb - 1)
Pawel Laszczakfba87012020-12-14 12:04:33 +01001254 trb->control |= cpu_to_le32(TRB_CHAIN);
Peter Chen87e1dcd2020-09-10 17:11:26 +08001255
Peter Chenabc6b572020-09-10 17:11:23 +08001256 s = sg_next(s);
Peter Chen87e1dcd2020-09-10 17:11:26 +08001257 }
Peter Chenabc6b572020-09-10 17:11:23 +08001258
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001259 control = 0;
1260 ++sg_iter;
1261 priv_req->end_trb = priv_ep->enqueue;
1262 cdns3_ep_inc_enq(priv_ep);
1263 trb = priv_ep->trb_pool + priv_ep->enqueue;
Peter Chen24fdaee2020-11-25 20:35:23 +08001264 trb->length = 0;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001265 } while (sg_iter < num_trb);
1266
1267 trb = priv_req->trb;
1268
1269 priv_req->flags |= REQUEST_PENDING;
Peter Chen249f0a22020-09-10 17:11:27 +08001270 priv_req->num_of_trb = num_trb;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001271
1272 if (sg_iter == 1)
Peter Chen8dafb3c2020-08-21 11:14:37 +08001273 trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001274
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001275 if (priv_dev->dev_ver < DEV_VER_V2 &&
1276 (priv_ep->flags & EP_TDLCHK_EN)) {
1277 u16 tdl = total_tdl;
1278 u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
1279
1280 if (tdl > EP_CMD_TDL_MAX) {
1281 tdl = EP_CMD_TDL_MAX;
1282 priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX;
1283 }
1284
1285 if (old_tdl < tdl) {
1286 tdl -= old_tdl;
1287 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
1288 &priv_dev->regs->ep_cmd);
1289 }
1290 }
1291
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001292 /*
1293 * Memory barrier - cycle bit must be set before other filds in trb.
1294 */
1295 wmb();
1296
1297 /* give the TD to the consumer*/
1298 if (togle_pcs)
Peter Chen8dafb3c2020-08-21 11:14:37 +08001299 trb->control = trb->control ^ cpu_to_le32(1);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001300
1301 if (priv_dev->dev_ver <= DEV_VER_V2)
1302 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
1303
Peter Chen4e218882020-09-10 17:11:24 +08001304 if (num_trb > 1) {
1305 int i = 0;
1306
1307 while (i < num_trb) {
1308 trace_cdns3_prepare_trb(priv_ep, trb + i);
1309 if (trb + i == link_trb) {
1310 trb = priv_ep->trb_pool;
1311 num_trb = num_trb - i;
1312 i = 0;
1313 } else {
1314 i++;
1315 }
1316 }
1317 } else {
1318 trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
1319 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001320
1321 /*
1322 * Memory barrier - Cycle Bit must be set before trb->length and
1323 * trb->buffer fields.
1324 */
1325 wmb();
1326
1327 /*
1328 * For DMULT mode we can set address to transfer ring only once after
1329 * enabling endpoint.
1330 */
1331 if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) {
1332 /*
1333 * Until SW is not ready to handle the OUT transfer the ISO OUT
1334 * Endpoint should be disabled (EP_CFG.ENABLE = 0).
1335 * EP_CFG_ENABLE must be set before updating ep_traddr.
1336 */
1337 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir &&
1338 !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) {
1339 priv_ep->flags |= EP_QUIRK_ISO_OUT_EN;
1340 cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
1341 EP_CFG_ENABLE);
1342 }
1343
1344 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma +
1345 priv_req->start_trb * TRB_SIZE),
1346 &priv_dev->regs->ep_traddr);
1347
1348 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR;
1349 }
1350
1351 if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) {
1352 trace_cdns3_ring(priv_ep);
1353 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
1354 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
1355 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
1356 trace_cdns3_doorbell_epx(priv_ep->name,
1357 readl(&priv_dev->regs->ep_traddr));
1358 }
1359
1360 /* WORKAROUND for transition to L0 */
1361 __cdns3_gadget_wakeup(priv_dev);
1362
1363 return 0;
1364}
1365
1366void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
1367{
1368 struct cdns3_endpoint *priv_ep;
1369 struct usb_ep *ep;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001370
1371 if (priv_dev->hw_configured_flag)
1372 return;
1373
1374 writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001375
1376 cdns3_set_register_bit(&priv_dev->regs->usb_conf,
1377 USB_CONF_U1EN | USB_CONF_U2EN);
1378
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001379 priv_dev->hw_configured_flag = 1;
1380
1381 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
1382 if (ep->enabled) {
1383 priv_ep = ep_to_cdns3_ep(ep);
1384 cdns3_start_all_request(priv_dev, priv_ep);
1385 }
1386 }
Peter Chenf4cfe5c2020-07-17 18:13:17 +08001387
1388 cdns3_allow_enable_l1(priv_dev, 1);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001389}
1390
1391/**
Peter Chen249f0a22020-09-10 17:11:27 +08001392 * cdns3_trb_handled - check whether trb has been handled by DMA
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001393 *
1394 * @priv_ep: extended endpoint object.
1395 * @priv_req: request object for checking
1396 *
1397 * Endpoint must be selected before invoking this function.
1398 *
1399 * Returns false if request has not been handled by DMA, else returns true.
1400 *
1401 * SR - start ring
1402 * ER - end ring
1403 * DQ = priv_ep->dequeue - dequeue position
1404 * EQ = priv_ep->enqueue - enqueue position
1405 * ST = priv_req->start_trb - index of first TRB in transfer ring
1406 * ET = priv_req->end_trb - index of last TRB in transfer ring
1407 * CI = current_index - index of processed TRB by DMA.
1408 *
Peter Chen249f0a22020-09-10 17:11:27 +08001409 * As first step, we check if the TRB between the ST and ET.
1410 * Then, we check if cycle bit for index priv_ep->dequeue
1411 * is correct.
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001412 *
1413 * some rules:
Peter Chen249f0a22020-09-10 17:11:27 +08001414 * 1. priv_ep->dequeue never equals to current_index.
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001415 * 2 priv_ep->enqueue never exceed priv_ep->dequeue
1416 * 3. exception: priv_ep->enqueue == priv_ep->dequeue
1417 * and priv_ep->free_trbs is zero.
1418 * This case indicate that TR is full.
1419 *
Peter Chen249f0a22020-09-10 17:11:27 +08001420 * At below two cases, the request have been handled.
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001421 * Case 1 - priv_ep->dequeue < current_index
1422 * SR ... EQ ... DQ ... CI ... ER
1423 * SR ... DQ ... CI ... EQ ... ER
1424 *
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001425 * Case 2 - priv_ep->dequeue > current_index
Peter Chen249f0a22020-09-10 17:11:27 +08001426 * This situation takes place when CI go through the LINK TRB at the end of
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001427 * transfer ring.
1428 * SR ... CI ... EQ ... DQ ... ER
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001429 */
Peter Chen249f0a22020-09-10 17:11:27 +08001430static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep,
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001431 struct cdns3_request *priv_req)
1432{
1433 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
Colin Ian King1f9f5a82020-02-08 16:18:02 +00001434 struct cdns3_trb *trb;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001435 int current_index = 0;
1436 int handled = 0;
1437 int doorbell;
1438
1439 current_index = cdns3_get_dma_pos(priv_dev, priv_ep);
1440 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
1441
Peter Chen249f0a22020-09-10 17:11:27 +08001442 /* current trb doesn't belong to this request */
1443 if (priv_req->start_trb < priv_req->end_trb) {
1444 if (priv_ep->dequeue > priv_req->end_trb)
1445 goto finish;
1446
1447 if (priv_ep->dequeue < priv_req->start_trb)
1448 goto finish;
1449 }
1450
1451 if ((priv_req->start_trb > priv_req->end_trb) &&
1452 (priv_ep->dequeue > priv_req->end_trb) &&
1453 (priv_ep->dequeue < priv_req->start_trb))
1454 goto finish;
1455
1456 if ((priv_req->start_trb == priv_req->end_trb) &&
1457 (priv_ep->dequeue != priv_req->end_trb))
1458 goto finish;
1459
1460 trb = &priv_ep->trb_pool[priv_ep->dequeue];
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001461
Peter Chen8dafb3c2020-08-21 11:14:37 +08001462 if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001463 goto finish;
1464
1465 if (doorbell == 1 && current_index == priv_ep->dequeue)
1466 goto finish;
1467
1468 /* The corner case for TRBS_PER_SEGMENT equal 2). */
1469 if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
1470 handled = 1;
1471 goto finish;
1472 }
1473
1474 if (priv_ep->enqueue == priv_ep->dequeue &&
1475 priv_ep->free_trbs == 0) {
1476 handled = 1;
1477 } else if (priv_ep->dequeue < current_index) {
1478 if ((current_index == (priv_ep->num_trbs - 1)) &&
1479 !priv_ep->dequeue)
1480 goto finish;
1481
Peter Chen249f0a22020-09-10 17:11:27 +08001482 handled = 1;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001483 } else if (priv_ep->dequeue > current_index) {
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001484 handled = 1;
1485 }
1486
1487finish:
1488 trace_cdns3_request_handled(priv_req, current_index, handled);
1489
1490 return handled;
1491}
1492
1493static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
1494 struct cdns3_endpoint *priv_ep)
1495{
1496 struct cdns3_request *priv_req;
1497 struct usb_request *request;
1498 struct cdns3_trb *trb;
Peter Chen249f0a22020-09-10 17:11:27 +08001499 bool request_handled = false;
1500 bool transfer_end = false;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001501
1502 while (!list_empty(&priv_ep->pending_req_list)) {
1503 request = cdns3_next_request(&priv_ep->pending_req_list);
1504 priv_req = to_cdns3_request(request);
1505
Pawel Laszczakf616c3b2019-10-13 10:20:20 +01001506 trb = priv_ep->trb_pool + priv_ep->dequeue;
1507
1508 /* Request was dequeued and TRB was changed to TRB_LINK. */
Peter Chen8dafb3c2020-08-21 11:14:37 +08001509 if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
Pawel Laszczakf616c3b2019-10-13 10:20:20 +01001510 trace_cdns3_complete_trb(priv_ep, trb);
1511 cdns3_move_deq_to_next_trb(priv_req);
1512 }
1513
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001514 if (!request->stream_id) {
1515 /* Re-select endpoint. It could be changed by other CPU
1516 * during handling usb_gadget_giveback_request.
1517 */
1518 cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001519
Peter Chen249f0a22020-09-10 17:11:27 +08001520 while (cdns3_trb_handled(priv_ep, priv_req)) {
1521 priv_req->finished_trb++;
1522 if (priv_req->finished_trb >= priv_req->num_of_trb)
1523 request_handled = true;
1524
1525 trb = priv_ep->trb_pool + priv_ep->dequeue;
1526 trace_cdns3_complete_trb(priv_ep, trb);
1527
1528 if (!transfer_end)
1529 request->actual +=
1530 TRB_LEN(le32_to_cpu(trb->length));
1531
1532 if (priv_req->num_of_trb > 1 &&
1533 le32_to_cpu(trb->control) & TRB_SMM)
1534 transfer_end = true;
1535
1536 cdns3_ep_inc_deq(priv_ep);
1537 }
1538
1539 if (request_handled) {
1540 cdns3_gadget_giveback(priv_ep, priv_req, 0);
1541 request_handled = false;
1542 transfer_end = false;
1543 } else {
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001544 goto prepare_next_td;
Peter Chen249f0a22020-09-10 17:11:27 +08001545 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001546
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001547 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
1548 TRBS_PER_SEGMENT == 2)
1549 break;
1550 } else {
1551 /* Re-select endpoint. It could be changed by other CPU
1552 * during handling usb_gadget_giveback_request.
1553 */
1554 cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
1555
1556 trb = priv_ep->trb_pool;
1557 trace_cdns3_complete_trb(priv_ep, trb);
1558
1559 if (trb != priv_req->trb)
1560 dev_warn(priv_dev->dev,
1561 "request_trb=0x%p, queue_trb=0x%p\n",
1562 priv_req->trb, trb);
1563
1564 request->actual += TRB_LEN(le32_to_cpu(trb->length));
1565
1566 if (!request->num_sgs ||
1567 (request->num_sgs == (priv_ep->stream_sg_idx + 1))) {
1568 priv_ep->stream_sg_idx = 0;
1569 cdns3_gadget_giveback(priv_ep, priv_req, 0);
1570 } else {
1571 priv_ep->stream_sg_idx++;
1572 cdns3_ep_run_stream_transfer(priv_ep, request);
1573 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001574 break;
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001575 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001576 }
1577 priv_ep->flags &= ~EP_PENDING_REQUEST;
1578
1579prepare_next_td:
1580 if (!(priv_ep->flags & EP_STALLED) &&
1581 !(priv_ep->flags & EP_STALL_PENDING))
1582 cdns3_start_all_request(priv_dev, priv_ep);
1583}
1584
1585void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
1586{
1587 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1588
1589 cdns3_wa1_restore_cycle_bit(priv_ep);
1590
1591 if (rearm) {
1592 trace_cdns3_ring(priv_ep);
1593
1594 /* Cycle Bit must be updated before arming DMA. */
1595 wmb();
1596 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
1597
1598 __cdns3_gadget_wakeup(priv_dev);
1599
1600 trace_cdns3_doorbell_epx(priv_ep->name,
1601 readl(&priv_dev->regs->ep_traddr));
1602 }
1603}
1604
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001605static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep)
1606{
1607 u16 tdl = priv_ep->pending_tdl;
1608 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1609
1610 if (tdl > EP_CMD_TDL_MAX) {
1611 tdl = EP_CMD_TDL_MAX;
1612 priv_ep->pending_tdl -= EP_CMD_TDL_MAX;
1613 } else {
1614 priv_ep->pending_tdl = 0;
1615 }
1616
1617 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd);
1618}
1619
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001620/**
1621 * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
1622 * @priv_ep: endpoint object
1623 *
1624 * Returns 0
1625 */
1626static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
1627{
1628 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1629 u32 ep_sts_reg;
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001630 struct usb_request *deferred_request;
1631 struct usb_request *pending_request;
1632 u32 tdl = 0;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001633
1634 cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
1635
1636 trace_cdns3_epx_irq(priv_dev, priv_ep);
1637
1638 ep_sts_reg = readl(&priv_dev->regs->ep_sts);
1639 writel(ep_sts_reg, &priv_dev->regs->ep_sts);
1640
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001641 if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) {
1642 bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY);
1643
1644 tdl = cdns3_get_tdl(priv_dev);
1645
1646 /*
1647 * Continue the previous transfer:
1648 * There is some racing between ERDY and PRIME. The device send
1649 * ERDY and almost in the same time Host send PRIME. It cause
1650 * that host ignore the ERDY packet and driver has to send it
1651 * again.
1652 */
Peter Chen8dafb3c2020-08-21 11:14:37 +08001653 if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) ||
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001654 EP_STS_HOSTPP(ep_sts_reg))) {
1655 writel(EP_CMD_ERDY |
1656 EP_CMD_ERDY_SID(priv_ep->last_stream_id),
1657 &priv_dev->regs->ep_cmd);
1658 ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC);
1659 } else {
1660 priv_ep->prime_flag = true;
1661
1662 pending_request = cdns3_next_request(&priv_ep->pending_req_list);
1663 deferred_request = cdns3_next_request(&priv_ep->deferred_req_list);
1664
1665 if (deferred_request && !pending_request) {
1666 cdns3_start_all_request(priv_dev, priv_ep);
1667 }
1668 }
1669 }
1670
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001671 if (ep_sts_reg & EP_STS_TRBERR) {
1672 if (priv_ep->flags & EP_STALL_PENDING &&
1673 !(ep_sts_reg & EP_STS_DESCMIS &&
1674 priv_dev->dev_ver < DEV_VER_V2)) {
1675 cdns3_ep_stall_flush(priv_ep);
1676 }
1677
1678 /*
1679 * For isochronous transfer driver completes request on
1680 * IOC or on TRBERR. IOC appears only when device receive
1681 * OUT data packet. If host disable stream or lost some packet
1682 * then the only way to finish all queued transfer is to do it
1683 * on TRBERR event.
1684 */
1685 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC &&
1686 !priv_ep->wa1_set) {
1687 if (!priv_ep->dir) {
1688 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg);
1689
1690 ep_cfg &= ~EP_CFG_ENABLE;
1691 writel(ep_cfg, &priv_dev->regs->ep_cfg);
1692 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
1693 }
1694 cdns3_transfer_completed(priv_dev, priv_ep);
1695 } else if (!(priv_ep->flags & EP_STALLED) &&
1696 !(priv_ep->flags & EP_STALL_PENDING)) {
1697 if (priv_ep->flags & EP_DEFERRED_DRDY) {
1698 priv_ep->flags &= ~EP_DEFERRED_DRDY;
1699 cdns3_start_all_request(priv_dev, priv_ep);
1700 } else {
1701 cdns3_rearm_transfer(priv_ep,
1702 priv_ep->wa1_set);
1703 }
1704 }
1705 }
1706
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001707 if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) ||
1708 (ep_sts_reg & EP_STS_IOT)) {
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +01001709 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
1710 if (ep_sts_reg & EP_STS_ISP)
1711 priv_ep->flags |= EP_QUIRK_END_TRANSFER;
1712 else
1713 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
1714 }
1715
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001716 if (!priv_ep->use_streams) {
1717 if ((ep_sts_reg & EP_STS_IOC) ||
1718 (ep_sts_reg & EP_STS_ISP)) {
1719 cdns3_transfer_completed(priv_dev, priv_ep);
1720 } else if ((priv_ep->flags & EP_TDLCHK_EN) &
1721 priv_ep->pending_tdl) {
1722 /* handle IOT with pending tdl */
1723 cdns3_reprogram_tdl(priv_ep);
1724 }
1725 } else if (priv_ep->dir == USB_DIR_OUT) {
1726 priv_ep->ep_sts_pending |= ep_sts_reg;
1727 } else if (ep_sts_reg & EP_STS_IOT) {
1728 cdns3_transfer_completed(priv_dev, priv_ep);
1729 }
1730 }
1731
1732 /*
1733 * MD_EXIT interrupt sets when stream capable endpoint exits
1734 * from MOVE DATA state of Bulk IN/OUT stream protocol state machine
1735 */
1736 if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) &&
1737 (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) {
1738 priv_ep->ep_sts_pending = 0;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001739 cdns3_transfer_completed(priv_dev, priv_ep);
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +01001740 }
1741
1742 /*
1743 * WA2: this condition should only be meet when
1744 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
1745 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001746 * In other cases this interrupt will be disabled.
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +01001747 */
1748 if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
1749 !(priv_ep->flags & EP_STALLED))
1750 cdns3_wa2_descmissing_packet(priv_ep);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001751
1752 return 0;
1753}
1754
1755static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
1756{
Peter Chene11d2bf2020-10-29 17:55:18 +08001757 if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001758 priv_dev->gadget_driver->disconnect(&priv_dev->gadget);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001759}
1760
1761/**
1762 * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device
1763 * @priv_dev: extended gadget object
1764 * @usb_ists: bitmap representation of device's reported interrupts
1765 * (usb_ists register value)
1766 */
1767static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
1768 u32 usb_ists)
Peter Chene11d2bf2020-10-29 17:55:18 +08001769__must_hold(&priv_dev->lock)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001770{
1771 int speed = 0;
1772
1773 trace_cdns3_usb_irq(priv_dev, usb_ists);
1774 if (usb_ists & USB_ISTS_L1ENTI) {
1775 /*
1776 * WORKAROUND: CDNS3 controller has issue with hardware resuming
1777 * from L1. To fix it, if any DMA transfer is pending driver
1778 * must starts driving resume signal immediately.
1779 */
1780 if (readl(&priv_dev->regs->drbl))
1781 __cdns3_gadget_wakeup(priv_dev);
1782 }
1783
1784 /* Connection detected */
1785 if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) {
1786 speed = cdns3_get_speed(priv_dev);
1787 priv_dev->gadget.speed = speed;
1788 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED);
1789 cdns3_ep0_config(priv_dev);
1790 }
1791
1792 /* Disconnection detected */
1793 if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) {
Peter Chene11d2bf2020-10-29 17:55:18 +08001794 spin_unlock(&priv_dev->lock);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001795 cdns3_disconnect_gadget(priv_dev);
Peter Chene11d2bf2020-10-29 17:55:18 +08001796 spin_lock(&priv_dev->lock);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001797 priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
1798 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
1799 cdns3_hw_reset_eps_config(priv_dev);
1800 }
1801
1802 if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) {
1803 if (priv_dev->gadget_driver &&
1804 priv_dev->gadget_driver->suspend) {
1805 spin_unlock(&priv_dev->lock);
1806 priv_dev->gadget_driver->suspend(&priv_dev->gadget);
1807 spin_lock(&priv_dev->lock);
1808 }
1809 }
1810
1811 if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) {
1812 if (priv_dev->gadget_driver &&
1813 priv_dev->gadget_driver->resume) {
1814 spin_unlock(&priv_dev->lock);
1815 priv_dev->gadget_driver->resume(&priv_dev->gadget);
1816 spin_lock(&priv_dev->lock);
1817 }
1818 }
1819
1820 /* reset*/
1821 if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) {
1822 if (priv_dev->gadget_driver) {
1823 spin_unlock(&priv_dev->lock);
1824 usb_gadget_udc_reset(&priv_dev->gadget,
1825 priv_dev->gadget_driver);
1826 spin_lock(&priv_dev->lock);
1827
1828 /*read again to check the actual speed*/
1829 speed = cdns3_get_speed(priv_dev);
1830 priv_dev->gadget.speed = speed;
1831 cdns3_hw_reset_eps_config(priv_dev);
1832 cdns3_ep0_config(priv_dev);
1833 }
1834 }
1835}
1836
1837/**
Lee Jones56480a02021-05-26 14:00:20 +01001838 * cdns3_device_irq_handler - interrupt handler for device part of controller
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001839 *
1840 * @irq: irq number for cdns3 core device
1841 * @data: structure of cdns3
1842 *
1843 * Returns IRQ_HANDLED or IRQ_NONE
1844 */
1845static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
1846{
Peter Chenaf58e1f2019-12-27 17:10:04 +08001847 struct cdns3_device *priv_dev = data;
Pawel Laszczak0b490042020-12-07 11:32:21 +01001848 struct cdns *cdns = dev_get_drvdata(priv_dev->dev);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001849 irqreturn_t ret = IRQ_NONE;
1850 u32 reg;
1851
Peter Chenb1234e32020-09-02 17:57:32 +08001852 if (cdns->in_lpm)
1853 return ret;
1854
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001855 /* check USB device interrupt */
1856 reg = readl(&priv_dev->regs->usb_ists);
1857 if (reg) {
1858 /* After masking interrupts the new interrupts won't be
1859 * reported in usb_ists/ep_ists. In order to not lose some
1860 * of them driver disables only detected interrupts.
1861 * They will be enabled ASAP after clearing source of
1862 * interrupt. This an unusual behavior only applies to
1863 * usb_ists register.
1864 */
1865 reg = ~reg & readl(&priv_dev->regs->usb_ien);
1866 /* mask deferred interrupt. */
1867 writel(reg, &priv_dev->regs->usb_ien);
1868 ret = IRQ_WAKE_THREAD;
1869 }
1870
1871 /* check endpoint interrupt */
1872 reg = readl(&priv_dev->regs->ep_ists);
1873 if (reg) {
1874 writel(0, &priv_dev->regs->ep_ien);
1875 ret = IRQ_WAKE_THREAD;
1876 }
1877
1878 return ret;
1879}
1880
1881/**
Lee Jones56480a02021-05-26 14:00:20 +01001882 * cdns3_device_thread_irq_handler - interrupt handler for device part
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001883 * of controller
1884 *
1885 * @irq: irq number for cdns3 core device
1886 * @data: structure of cdns3
1887 *
1888 * Returns IRQ_HANDLED or IRQ_NONE
1889 */
1890static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
1891{
Peter Chenaf58e1f2019-12-27 17:10:04 +08001892 struct cdns3_device *priv_dev = data;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001893 irqreturn_t ret = IRQ_NONE;
1894 unsigned long flags;
Peter Chen06825ca2020-06-23 11:10:01 +08001895 unsigned int bit;
Peter Chen8685c462020-06-23 11:10:00 +08001896 unsigned long reg;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001897
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001898 spin_lock_irqsave(&priv_dev->lock, flags);
1899
1900 reg = readl(&priv_dev->regs->usb_ists);
1901 if (reg) {
1902 writel(reg, &priv_dev->regs->usb_ists);
1903 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien);
1904 cdns3_check_usb_interrupt_proceed(priv_dev, reg);
1905 ret = IRQ_HANDLED;
1906 }
1907
1908 reg = readl(&priv_dev->regs->ep_ists);
1909
1910 /* handle default endpoint OUT */
1911 if (reg & EP_ISTS_EP_OUT0) {
1912 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT);
1913 ret = IRQ_HANDLED;
1914 }
1915
1916 /* handle default endpoint IN */
1917 if (reg & EP_ISTS_EP_IN0) {
1918 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN);
1919 ret = IRQ_HANDLED;
1920 }
1921
1922 /* check if interrupt from non default endpoint, if no exit */
1923 reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0);
1924 if (!reg)
1925 goto irqend;
1926
Peter Chen8685c462020-06-23 11:10:00 +08001927 for_each_set_bit(bit, &reg,
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001928 sizeof(u32) * BITS_PER_BYTE) {
1929 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]);
1930 ret = IRQ_HANDLED;
1931 }
1932
Jayshri Pawar54c4c692019-12-13 06:25:42 +01001933 if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams)
1934 cdns3_wa2_check_outq_status(priv_dev);
1935
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001936irqend:
1937 writel(~0, &priv_dev->regs->ep_ien);
1938 spin_unlock_irqrestore(&priv_dev->lock, flags);
1939
1940 return ret;
1941}
1942
1943/**
1944 * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP
1945 *
1946 * The real reservation will occur during write to EP_CFG register,
1947 * this function is used to check if the 'size' reservation is allowed.
1948 *
1949 * @priv_dev: extended gadget object
1950 * @size: the size (KB) for EP would like to allocate
1951 * @is_in: endpoint direction
1952 *
1953 * Return 0 if the required size can met or negative value on failure
1954 */
1955static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
1956 int size, int is_in)
1957{
1958 int remained;
1959
1960 /* 2KB are reserved for EP0*/
1961 remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2;
1962
1963 if (is_in) {
1964 if (remained < size)
1965 return -EPERM;
1966
1967 priv_dev->onchip_used_size += size;
1968 } else {
1969 int required;
1970
1971 /**
1972 * ALL OUT EPs are shared the same chunk onchip memory, so
1973 * driver checks if it already has assigned enough buffers
1974 */
1975 if (priv_dev->out_mem_is_allocated >= size)
1976 return 0;
1977
1978 required = size - priv_dev->out_mem_is_allocated;
1979
1980 if (required > remained)
1981 return -EPERM;
1982
1983 priv_dev->out_mem_is_allocated += required;
1984 priv_dev->onchip_used_size += required;
1985 }
1986
1987 return 0;
1988}
1989
Jason Yane9010322020-04-02 20:38:37 +08001990static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
kbuild test robote2e77a92020-03-27 09:12:01 +08001991 struct cdns3_endpoint *priv_ep)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01001992{
1993 struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
1994
1995 /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */
1996 if (priv_dev->dev_ver <= DEV_VER_V2)
1997 writel(USB_CONF_DMULT, &regs->usb_conf);
1998
1999 if (priv_dev->dev_ver == DEV_VER_V2)
2000 writel(USB_CONF2_EN_TDL_TRB, &regs->usb_conf2);
2001
2002 if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) {
2003 u32 mask;
2004
2005 if (priv_ep->dir)
2006 mask = BIT(priv_ep->num + 16);
2007 else
2008 mask = BIT(priv_ep->num);
2009
2010 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
2011 cdns3_set_register_bit(&regs->tdl_from_trb, mask);
2012 cdns3_set_register_bit(&regs->tdl_beh, mask);
2013 cdns3_set_register_bit(&regs->tdl_beh2, mask);
2014 cdns3_set_register_bit(&regs->dma_adv_td, mask);
2015 }
2016
2017 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
2018 cdns3_set_register_bit(&regs->tdl_from_trb, mask);
2019
2020 cdns3_set_register_bit(&regs->dtrans, mask);
2021 }
2022}
2023
2024/**
Lee Jones56480a02021-05-26 14:00:20 +01002025 * cdns3_ep_config - Configure hardware endpoint
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002026 * @priv_ep: extended endpoint object
Pawel Laszczak52d39672020-10-22 08:55:05 +08002027 * @enable: set EP_CFG_ENABLE bit in ep_cfg register.
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002028 */
Pawel Laszczak52d39672020-10-22 08:55:05 +08002029int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002030{
2031 bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
2032 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2033 u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
2034 u32 max_packet_size = 0;
2035 u8 maxburst = 0;
2036 u32 ep_cfg = 0;
2037 u8 buffering;
2038 u8 mult = 0;
2039 int ret;
2040
2041 buffering = CDNS3_EP_BUF_SIZE - 1;
2042
2043 cdns3_configure_dmult(priv_dev, priv_ep);
2044
2045 switch (priv_ep->type) {
2046 case USB_ENDPOINT_XFER_INT:
2047 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
2048
2049 if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
2050 priv_dev->dev_ver > DEV_VER_V2)
2051 ep_cfg |= EP_CFG_TDL_CHK;
2052 break;
2053 case USB_ENDPOINT_XFER_BULK:
2054 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
2055
2056 if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
2057 priv_dev->dev_ver > DEV_VER_V2)
2058 ep_cfg |= EP_CFG_TDL_CHK;
2059 break;
2060 default:
2061 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
2062 mult = CDNS3_EP_ISO_HS_MULT - 1;
2063 buffering = mult + 1;
2064 }
2065
2066 switch (priv_dev->gadget.speed) {
2067 case USB_SPEED_FULL:
2068 max_packet_size = is_iso_ep ? 1023 : 64;
2069 break;
2070 case USB_SPEED_HIGH:
2071 max_packet_size = is_iso_ep ? 1024 : 512;
2072 break;
2073 case USB_SPEED_SUPER:
2074 /* It's limitation that driver assumes in driver. */
2075 mult = 0;
2076 max_packet_size = 1024;
2077 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
2078 maxburst = CDNS3_EP_ISO_SS_BURST - 1;
2079 buffering = (mult + 1) *
2080 (maxburst + 1);
2081
2082 if (priv_ep->interval > 1)
2083 buffering++;
2084 } else {
2085 maxburst = CDNS3_EP_BUF_SIZE - 1;
2086 }
2087 break;
2088 default:
2089 /* all other speed are not supported */
Pawel Laszczak52d39672020-10-22 08:55:05 +08002090 return -EINVAL;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002091 }
2092
2093 if (max_packet_size == 1024)
2094 priv_ep->trb_burst_size = 128;
2095 else if (max_packet_size >= 512)
2096 priv_ep->trb_burst_size = 64;
2097 else
2098 priv_ep->trb_burst_size = 16;
2099
Pawel Laszczak52d39672020-10-22 08:55:05 +08002100 /* onchip buffer is only allocated before configuration */
2101 if (!priv_dev->hw_configured_flag) {
2102 ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
2103 !!priv_ep->dir);
2104 if (ret) {
2105 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
2106 return ret;
2107 }
2108 }
2109
2110 if (enable)
2111 ep_cfg |= EP_CFG_ENABLE;
2112
2113 if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) {
2114 if (priv_dev->dev_ver >= DEV_VER_V3) {
2115 u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
2116
2117 /*
2118 * Stream capable endpoints are handled by using ep_tdl
2119 * register. Other endpoints use TDL from TRB feature.
2120 */
2121 cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb,
2122 mask);
2123 }
2124
2125 /* Enable Stream Bit TDL chk and SID chk */
2126 ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002127 }
2128
2129 ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
2130 EP_CFG_MULT(mult) |
2131 EP_CFG_BUFFERING(buffering) |
2132 EP_CFG_MAXBURST(maxburst);
2133
2134 cdns3_select_ep(priv_dev, bEndpointAddress);
2135 writel(ep_cfg, &priv_dev->regs->ep_cfg);
Pawel Laszczak52d39672020-10-22 08:55:05 +08002136 priv_ep->flags |= EP_CONFIGURED;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002137
2138 dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n",
2139 priv_ep->name, ep_cfg);
Pawel Laszczak52d39672020-10-22 08:55:05 +08002140
2141 return 0;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002142}
2143
2144/* Find correct direction for HW endpoint according to description */
2145static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc,
2146 struct cdns3_endpoint *priv_ep)
2147{
2148 return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) ||
2149 (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc));
2150}
2151
2152static struct
2153cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev,
2154 struct usb_endpoint_descriptor *desc)
2155{
2156 struct usb_ep *ep;
2157 struct cdns3_endpoint *priv_ep;
2158
2159 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
2160 unsigned long num;
2161 int ret;
2162 /* ep name pattern likes epXin or epXout */
2163 char c[2] = {ep->name[2], '\0'};
2164
2165 ret = kstrtoul(c, 10, &num);
2166 if (ret)
2167 return ERR_PTR(ret);
2168
2169 priv_ep = ep_to_cdns3_ep(ep);
2170 if (cdns3_ep_dir_is_correct(desc, priv_ep)) {
2171 if (!(priv_ep->flags & EP_CLAIMED)) {
2172 priv_ep->num = num;
2173 return priv_ep;
2174 }
2175 }
2176 }
2177
2178 return ERR_PTR(-ENOENT);
2179}
2180
2181/*
2182 * Cadence IP has one limitation that all endpoints must be configured
2183 * (Type & MaxPacketSize) before setting configuration through hardware
2184 * register, it means we can't change endpoints configuration after
2185 * set_configuration.
2186 *
2187 * This function set EP_CLAIMED flag which is added when the gadget driver
2188 * uses usb_ep_autoconfig to configure specific endpoint;
2189 * When the udc driver receives set_configurion request,
2190 * it goes through all claimed endpoints, and configure all endpoints
2191 * accordingly.
2192 *
2193 * At usb_ep_ops.enable/disable, we only enable and disable endpoint through
2194 * ep_cfg register which can be changed after set_configuration, and do
2195 * some software operation accordingly.
2196 */
2197static struct
2198usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
2199 struct usb_endpoint_descriptor *desc,
2200 struct usb_ss_ep_comp_descriptor *comp_desc)
2201{
2202 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2203 struct cdns3_endpoint *priv_ep;
2204 unsigned long flags;
2205
2206 priv_ep = cdns3_find_available_ep(priv_dev, desc);
2207 if (IS_ERR(priv_ep)) {
2208 dev_err(priv_dev->dev, "no available ep\n");
2209 return NULL;
2210 }
2211
2212 dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name);
2213
2214 spin_lock_irqsave(&priv_dev->lock, flags);
2215 priv_ep->endpoint.desc = desc;
2216 priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT;
2217 priv_ep->type = usb_endpoint_type(desc);
2218 priv_ep->flags |= EP_CLAIMED;
2219 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
2220
2221 spin_unlock_irqrestore(&priv_dev->lock, flags);
2222 return &priv_ep->endpoint;
2223}
2224
2225/**
Lee Jones56480a02021-05-26 14:00:20 +01002226 * cdns3_gadget_ep_alloc_request - Allocates request
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002227 * @ep: endpoint object associated with request
2228 * @gfp_flags: gfp flags
2229 *
2230 * Returns allocated request address, NULL on allocation error
2231 */
2232struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep,
2233 gfp_t gfp_flags)
2234{
2235 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2236 struct cdns3_request *priv_req;
2237
2238 priv_req = kzalloc(sizeof(*priv_req), gfp_flags);
2239 if (!priv_req)
2240 return NULL;
2241
2242 priv_req->priv_ep = priv_ep;
2243
2244 trace_cdns3_alloc_request(priv_req);
2245 return &priv_req->request;
2246}
2247
2248/**
Lee Jones56480a02021-05-26 14:00:20 +01002249 * cdns3_gadget_ep_free_request - Free memory occupied by request
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002250 * @ep: endpoint object associated with request
2251 * @request: request to free memory
2252 */
2253void cdns3_gadget_ep_free_request(struct usb_ep *ep,
2254 struct usb_request *request)
2255{
2256 struct cdns3_request *priv_req = to_cdns3_request(request);
2257
2258 if (priv_req->aligned_buf)
2259 priv_req->aligned_buf->in_use = 0;
2260
2261 trace_cdns3_free_request(priv_req);
2262 kfree(priv_req);
2263}
2264
2265/**
Lee Jones56480a02021-05-26 14:00:20 +01002266 * cdns3_gadget_ep_enable - Enable endpoint
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002267 * @ep: endpoint object
2268 * @desc: endpoint descriptor
2269 *
2270 * Returns 0 on success, error code elsewhere
2271 */
2272static int cdns3_gadget_ep_enable(struct usb_ep *ep,
2273 const struct usb_endpoint_descriptor *desc)
2274{
2275 struct cdns3_endpoint *priv_ep;
2276 struct cdns3_device *priv_dev;
Jayshri Pawar54c4c692019-12-13 06:25:42 +01002277 const struct usb_ss_ep_comp_descriptor *comp_desc;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002278 u32 reg = EP_STS_EN_TRBERREN;
2279 u32 bEndpointAddress;
2280 unsigned long flags;
2281 int enable = 1;
Pawel Laszczak52d39672020-10-22 08:55:05 +08002282 int ret = 0;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002283 int val;
2284
2285 priv_ep = ep_to_cdns3_ep(ep);
2286 priv_dev = priv_ep->cdns3_dev;
Jayshri Pawar54c4c692019-12-13 06:25:42 +01002287 comp_desc = priv_ep->endpoint.comp_desc;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002288
2289 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
2290 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
2291 return -EINVAL;
2292 }
2293
2294 if (!desc->wMaxPacketSize) {
2295 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n");
2296 return -EINVAL;
2297 }
2298
2299 if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED,
2300 "%s is already enabled\n", priv_ep->name))
2301 return 0;
2302
2303 spin_lock_irqsave(&priv_dev->lock, flags);
2304
2305 priv_ep->endpoint.desc = desc;
2306 priv_ep->type = usb_endpoint_type(desc);
2307 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
2308
2309 if (priv_ep->interval > ISO_MAX_INTERVAL &&
2310 priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
2311 dev_err(priv_dev->dev, "Driver is limited to %d period\n",
2312 ISO_MAX_INTERVAL);
2313
2314 ret = -EINVAL;
2315 goto exit;
2316 }
2317
Jayshri Pawar54c4c692019-12-13 06:25:42 +01002318 bEndpointAddress = priv_ep->num | priv_ep->dir;
2319 cdns3_select_ep(priv_dev, bEndpointAddress);
2320
Pawel Laszczak52d39672020-10-22 08:55:05 +08002321 /*
2322 * For some versions of controller at some point during ISO OUT traffic
2323 * DMA reads Transfer Ring for the EP which has never got doorbell.
2324 * This issue was detected only on simulation, but to avoid this issue
2325 * driver add protection against it. To fix it driver enable ISO OUT
2326 * endpoint before setting DRBL. This special treatment of ISO OUT
2327 * endpoints are recommended by controller specification.
2328 */
2329 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
2330 enable = 0;
2331
Jayshri Pawar54c4c692019-12-13 06:25:42 +01002332 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
2333 /*
2334 * Enable stream support (SS mode) related interrupts
2335 * in EP_STS_EN Register
2336 */
2337 if (priv_dev->gadget.speed >= USB_SPEED_SUPER) {
2338 reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN |
2339 EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN |
2340 EP_STS_EN_STREAMREN;
2341 priv_ep->use_streams = true;
Pawel Laszczak52d39672020-10-22 08:55:05 +08002342 ret = cdns3_ep_config(priv_ep, enable);
Jayshri Pawar54c4c692019-12-13 06:25:42 +01002343 priv_dev->using_streams |= true;
2344 }
Pawel Laszczak52d39672020-10-22 08:55:05 +08002345 } else {
2346 ret = cdns3_ep_config(priv_ep, enable);
Jayshri Pawar54c4c692019-12-13 06:25:42 +01002347 }
2348
Pawel Laszczak52d39672020-10-22 08:55:05 +08002349 if (ret)
2350 goto exit;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002351
Pawel Laszczak52d39672020-10-22 08:55:05 +08002352 ret = cdns3_allocate_trb_pool(priv_ep);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002353 if (ret)
2354 goto exit;
2355
2356 bEndpointAddress = priv_ep->num | priv_ep->dir;
2357 cdns3_select_ep(priv_dev, bEndpointAddress);
2358
2359 trace_cdns3_gadget_ep_enable(priv_ep);
2360
2361 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2362
2363 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2364 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
2365 1, 1000);
2366
2367 if (unlikely(ret)) {
2368 cdns3_free_trb_pool(priv_ep);
2369 ret = -EINVAL;
2370 goto exit;
2371 }
2372
2373 /* enable interrupt for selected endpoint */
2374 cdns3_set_register_bit(&priv_dev->regs->ep_ien,
2375 BIT(cdns3_ep_addr_to_index(bEndpointAddress)));
2376
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +01002377 if (priv_dev->dev_ver < DEV_VER_V2)
2378 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg);
2379
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002380 writel(reg, &priv_dev->regs->ep_sts_en);
2381
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002382 ep->desc = desc;
2383 priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING |
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +01002384 EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002385 priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR;
2386 priv_ep->wa1_set = 0;
2387 priv_ep->enqueue = 0;
2388 priv_ep->dequeue = 0;
2389 reg = readl(&priv_dev->regs->ep_sts);
2390 priv_ep->pcs = !!EP_STS_CCS(reg);
2391 priv_ep->ccs = !!EP_STS_CCS(reg);
2392 /* one TRB is reserved for link TRB used in DMULT mode*/
2393 priv_ep->free_trbs = priv_ep->num_trbs - 1;
2394exit:
2395 spin_unlock_irqrestore(&priv_dev->lock, flags);
2396
2397 return ret;
2398}
2399
2400/**
Lee Jones56480a02021-05-26 14:00:20 +01002401 * cdns3_gadget_ep_disable - Disable endpoint
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002402 * @ep: endpoint object
2403 *
2404 * Returns 0 on success, error code elsewhere
2405 */
2406static int cdns3_gadget_ep_disable(struct usb_ep *ep)
2407{
2408 struct cdns3_endpoint *priv_ep;
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +01002409 struct cdns3_request *priv_req;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002410 struct cdns3_device *priv_dev;
2411 struct usb_request *request;
2412 unsigned long flags;
2413 int ret = 0;
2414 u32 ep_cfg;
2415 int val;
2416
2417 if (!ep) {
2418 pr_err("usbss: invalid parameters\n");
2419 return -EINVAL;
2420 }
2421
2422 priv_ep = ep_to_cdns3_ep(ep);
2423 priv_dev = priv_ep->cdns3_dev;
2424
2425 if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED),
2426 "%s is already disabled\n", priv_ep->name))
2427 return 0;
2428
2429 spin_lock_irqsave(&priv_dev->lock, flags);
2430
2431 trace_cdns3_gadget_ep_disable(priv_ep);
2432
2433 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
2434
2435 ep_cfg = readl(&priv_dev->regs->ep_cfg);
2436 ep_cfg &= ~EP_CFG_ENABLE;
2437 writel(ep_cfg, &priv_dev->regs->ep_cfg);
2438
2439 /**
2440 * Driver needs some time before resetting endpoint.
2441 * It need waits for clearing DBUSY bit or for timeout expired.
2442 * 10us is enough time for controller to stop transfer.
2443 */
2444 readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val,
2445 !(val & EP_STS_DBUSY), 1, 10);
2446 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2447
2448 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2449 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
2450 1, 1000);
2451 if (unlikely(ret))
2452 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n",
2453 priv_ep->name);
2454
2455 while (!list_empty(&priv_ep->pending_req_list)) {
2456 request = cdns3_next_request(&priv_ep->pending_req_list);
2457
2458 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
2459 -ESHUTDOWN);
2460 }
2461
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +01002462 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
2463 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
2464
2465 kfree(priv_req->request.buf);
2466 cdns3_gadget_ep_free_request(&priv_ep->endpoint,
2467 &priv_req->request);
2468 list_del_init(&priv_req->list);
2469 --priv_ep->wa2_counter;
2470 }
2471
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002472 while (!list_empty(&priv_ep->deferred_req_list)) {
2473 request = cdns3_next_request(&priv_ep->deferred_req_list);
2474
2475 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
2476 -ESHUTDOWN);
2477 }
2478
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +01002479 priv_ep->descmis_req = NULL;
2480
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002481 ep->desc = NULL;
2482 priv_ep->flags &= ~EP_ENABLED;
Jayshri Pawar54c4c692019-12-13 06:25:42 +01002483 priv_ep->use_streams = false;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002484
2485 spin_unlock_irqrestore(&priv_dev->lock, flags);
2486
2487 return ret;
2488}
2489
2490/**
Lee Jones00dfda22021-05-26 14:00:33 +01002491 * __cdns3_gadget_ep_queue - Transfer data on endpoint
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002492 * @ep: endpoint object
2493 * @request: request object
2494 * @gfp_flags: gfp flags
2495 *
2496 * Returns 0 on success, error code elsewhere
2497 */
2498static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
2499 struct usb_request *request,
2500 gfp_t gfp_flags)
2501{
2502 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2503 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2504 struct cdns3_request *priv_req;
2505 int ret = 0;
2506
2507 request->actual = 0;
2508 request->status = -EINPROGRESS;
2509 priv_req = to_cdns3_request(request);
2510 trace_cdns3_ep_queue(priv_req);
2511
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +01002512 if (priv_dev->dev_ver < DEV_VER_V2) {
2513 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep,
2514 priv_req);
2515
2516 if (ret == EINPROGRESS)
2517 return 0;
2518 }
2519
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002520 ret = cdns3_prepare_aligned_request_buf(priv_req);
2521 if (ret < 0)
2522 return ret;
2523
2524 ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request,
2525 usb_endpoint_dir_in(ep->desc));
2526 if (ret)
2527 return ret;
2528
2529 list_add_tail(&request->list, &priv_ep->deferred_req_list);
2530
2531 /*
Jayshri Pawar54c4c692019-12-13 06:25:42 +01002532 * For stream capable endpoint if prime irq flag is set then only start
2533 * request.
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002534 * If hardware endpoint configuration has not been set yet then
2535 * just queue request in deferred list. Transfer will be started in
2536 * cdns3_set_hw_configuration.
2537 */
Jayshri Pawar54c4c692019-12-13 06:25:42 +01002538 if (!request->stream_id) {
2539 if (priv_dev->hw_configured_flag &&
2540 !(priv_ep->flags & EP_STALLED) &&
2541 !(priv_ep->flags & EP_STALL_PENDING))
2542 cdns3_start_all_request(priv_dev, priv_ep);
2543 } else {
2544 if (priv_dev->hw_configured_flag && priv_ep->prime_flag)
2545 cdns3_start_all_request(priv_dev, priv_ep);
2546 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002547
2548 return 0;
2549}
2550
2551static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
2552 gfp_t gfp_flags)
2553{
2554 struct usb_request *zlp_request;
2555 struct cdns3_endpoint *priv_ep;
2556 struct cdns3_device *priv_dev;
2557 unsigned long flags;
2558 int ret;
2559
2560 if (!request || !ep)
2561 return -EINVAL;
2562
2563 priv_ep = ep_to_cdns3_ep(ep);
2564 priv_dev = priv_ep->cdns3_dev;
2565
2566 spin_lock_irqsave(&priv_dev->lock, flags);
2567
2568 ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags);
2569
2570 if (ret == 0 && request->zero && request->length &&
2571 (request->length % ep->maxpacket == 0)) {
2572 struct cdns3_request *priv_req;
2573
2574 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
2575 zlp_request->buf = priv_dev->zlp_buf;
2576 zlp_request->length = 0;
2577
2578 priv_req = to_cdns3_request(zlp_request);
2579 priv_req->flags |= REQUEST_ZLP;
2580
2581 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n",
2582 priv_ep->name);
2583 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags);
2584 }
2585
2586 spin_unlock_irqrestore(&priv_dev->lock, flags);
2587 return ret;
2588}
2589
2590/**
Lee Jones56480a02021-05-26 14:00:20 +01002591 * cdns3_gadget_ep_dequeue - Remove request from transfer queue
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002592 * @ep: endpoint object associated with request
2593 * @request: request object
2594 *
2595 * Returns 0 on success, error code elsewhere
2596 */
2597int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
2598 struct usb_request *request)
2599{
2600 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2601 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2602 struct usb_request *req, *req_temp;
2603 struct cdns3_request *priv_req;
2604 struct cdns3_trb *link_trb;
Pawel Laszczakf616c3b2019-10-13 10:20:20 +01002605 u8 req_on_hw_ring = 0;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002606 unsigned long flags;
2607 int ret = 0;
2608
2609 if (!ep || !request || !ep->desc)
2610 return -EINVAL;
2611
2612 spin_lock_irqsave(&priv_dev->lock, flags);
2613
2614 priv_req = to_cdns3_request(request);
2615
2616 trace_cdns3_ep_dequeue(priv_req);
2617
2618 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
2619
2620 list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
2621 list) {
Pawel Laszczakf616c3b2019-10-13 10:20:20 +01002622 if (request == req) {
2623 req_on_hw_ring = 1;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002624 goto found;
Pawel Laszczakf616c3b2019-10-13 10:20:20 +01002625 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002626 }
2627
2628 list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
2629 list) {
2630 if (request == req)
2631 goto found;
2632 }
2633
2634 goto not_found;
2635
2636found:
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002637 link_trb = priv_req->trb;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002638
Pawel Laszczakf616c3b2019-10-13 10:20:20 +01002639 /* Update ring only if removed request is on pending_req_list list */
Peter Chen95cd7dc2020-04-30 15:07:13 +08002640 if (req_on_hw_ring && link_trb) {
Peter Chen8dafb3c2020-08-21 11:14:37 +08002641 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
2642 ((priv_req->end_trb + 1) * TRB_SIZE)));
2643 link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
2644 TRB_TYPE(TRB_LINK) | TRB_CHAIN);
Pawel Laszczakf616c3b2019-10-13 10:20:20 +01002645
2646 if (priv_ep->wa1_trb == priv_req->trb)
2647 cdns3_wa1_restore_cycle_bit(priv_ep);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002648 }
2649
Pawel Laszczakf616c3b2019-10-13 10:20:20 +01002650 cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
2651
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002652not_found:
2653 spin_unlock_irqrestore(&priv_dev->lock, flags);
2654 return ret;
2655}
2656
2657/**
Lee Jones56480a02021-05-26 14:00:20 +01002658 * __cdns3_gadget_ep_set_halt - Sets stall on selected endpoint
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002659 * Should be called after acquiring spin_lock and selecting ep
Lee Jones4a35aa62020-07-02 15:46:12 +01002660 * @priv_ep: endpoint object to set stall on.
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002661 */
2662void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep)
2663{
2664 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2665
2666 trace_cdns3_halt(priv_ep, 1, 0);
2667
2668 if (!(priv_ep->flags & EP_STALLED)) {
2669 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts);
2670
2671 if (!(ep_sts_reg & EP_STS_DBUSY))
2672 cdns3_ep_stall_flush(priv_ep);
2673 else
2674 priv_ep->flags |= EP_STALL_PENDING;
2675 }
2676}
2677
2678/**
Lee Jones56480a02021-05-26 14:00:20 +01002679 * __cdns3_gadget_ep_clear_halt - Clears stall on selected endpoint
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002680 * Should be called after acquiring spin_lock and selecting ep
Lee Jones4a35aa62020-07-02 15:46:12 +01002681 * @priv_ep: endpoint object to clear stall on
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002682 */
2683int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
2684{
2685 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2686 struct usb_request *request;
Peter Chen4bf2dd62020-02-19 22:14:55 +08002687 struct cdns3_request *priv_req;
2688 struct cdns3_trb *trb = NULL;
Colin Ian King04db1d22019-09-02 15:50:35 +01002689 int ret;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002690 int val;
2691
2692 trace_cdns3_halt(priv_ep, 0, 0);
2693
Peter Chen4bf2dd62020-02-19 22:14:55 +08002694 request = cdns3_next_request(&priv_ep->pending_req_list);
2695 if (request) {
2696 priv_req = to_cdns3_request(request);
2697 trb = priv_req->trb;
2698 if (trb)
Peter Chen8dafb3c2020-08-21 11:14:37 +08002699 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
Peter Chen4bf2dd62020-02-19 22:14:55 +08002700 }
2701
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002702 writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2703
2704 /* wait for EPRST cleared */
Colin Ian King04db1d22019-09-02 15:50:35 +01002705 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2706 !(val & EP_CMD_EPRST), 1, 100);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002707 if (ret)
2708 return -EINVAL;
2709
2710 priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
2711
Peter Chen4bf2dd62020-02-19 22:14:55 +08002712 if (request) {
2713 if (trb)
Peter Chen8dafb3c2020-08-21 11:14:37 +08002714 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
2715
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002716 cdns3_rearm_transfer(priv_ep, 1);
Peter Chen4bf2dd62020-02-19 22:14:55 +08002717 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002718
2719 cdns3_start_all_request(priv_dev, priv_ep);
2720 return ret;
2721}
2722
2723/**
Lee Jones56480a02021-05-26 14:00:20 +01002724 * cdns3_gadget_ep_set_halt - Sets/clears stall on selected endpoint
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002725 * @ep: endpoint object to set/clear stall on
2726 * @value: 1 for set stall, 0 for clear stall
2727 *
2728 * Returns 0 on success, error code elsewhere
2729 */
2730int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value)
2731{
2732 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2733 struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2734 unsigned long flags;
2735 int ret = 0;
2736
2737 if (!(priv_ep->flags & EP_ENABLED))
2738 return -EPERM;
2739
2740 spin_lock_irqsave(&priv_dev->lock, flags);
2741
2742 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
2743
2744 if (!value) {
2745 priv_ep->flags &= ~EP_WEDGE;
2746 ret = __cdns3_gadget_ep_clear_halt(priv_ep);
2747 } else {
2748 __cdns3_gadget_ep_set_halt(priv_ep);
2749 }
2750
2751 spin_unlock_irqrestore(&priv_dev->lock, flags);
2752
2753 return ret;
2754}
2755
2756extern const struct usb_ep_ops cdns3_gadget_ep0_ops;
2757
2758static const struct usb_ep_ops cdns3_gadget_ep_ops = {
2759 .enable = cdns3_gadget_ep_enable,
2760 .disable = cdns3_gadget_ep_disable,
2761 .alloc_request = cdns3_gadget_ep_alloc_request,
2762 .free_request = cdns3_gadget_ep_free_request,
2763 .queue = cdns3_gadget_ep_queue,
2764 .dequeue = cdns3_gadget_ep_dequeue,
2765 .set_halt = cdns3_gadget_ep_set_halt,
2766 .set_wedge = cdns3_gadget_ep_set_wedge,
2767};
2768
2769/**
Lee Jones56480a02021-05-26 14:00:20 +01002770 * cdns3_gadget_get_frame - Returns number of actual ITP frame
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002771 * @gadget: gadget object
2772 *
2773 * Returns number of actual ITP frame
2774 */
2775static int cdns3_gadget_get_frame(struct usb_gadget *gadget)
2776{
2777 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2778
2779 return readl(&priv_dev->regs->usb_itpn);
2780}
2781
2782int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev)
2783{
2784 enum usb_device_speed speed;
2785
2786 speed = cdns3_get_speed(priv_dev);
2787
2788 if (speed >= USB_SPEED_SUPER)
2789 return 0;
2790
2791 /* Start driving resume signaling to indicate remote wakeup. */
2792 writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf);
2793
2794 return 0;
2795}
2796
2797static int cdns3_gadget_wakeup(struct usb_gadget *gadget)
2798{
2799 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2800 unsigned long flags;
2801 int ret = 0;
2802
2803 spin_lock_irqsave(&priv_dev->lock, flags);
2804 ret = __cdns3_gadget_wakeup(priv_dev);
2805 spin_unlock_irqrestore(&priv_dev->lock, flags);
2806 return ret;
2807}
2808
2809static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget,
2810 int is_selfpowered)
2811{
2812 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2813 unsigned long flags;
2814
2815 spin_lock_irqsave(&priv_dev->lock, flags);
2816 priv_dev->is_selfpowered = !!is_selfpowered;
2817 spin_unlock_irqrestore(&priv_dev->lock, flags);
2818 return 0;
2819}
2820
2821static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on)
2822{
2823 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2824
Peter Chen0eeda052020-09-01 10:33:50 +08002825 if (is_on) {
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002826 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
Peter Chen0eeda052020-09-01 10:33:50 +08002827 } else {
2828 writel(~0, &priv_dev->regs->ep_ists);
2829 writel(~0, &priv_dev->regs->usb_ists);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002830 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
Peter Chen0eeda052020-09-01 10:33:50 +08002831 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002832
2833 return 0;
2834}
2835
2836static void cdns3_gadget_config(struct cdns3_device *priv_dev)
2837{
2838 struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
2839 u32 reg;
2840
2841 cdns3_ep0_config(priv_dev);
2842
2843 /* enable interrupts for endpoint 0 (in and out) */
2844 writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, &regs->ep_ien);
2845
2846 /*
2847 * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1
2848 * revision of controller.
2849 */
2850 if (priv_dev->dev_ver == DEV_VER_TI_V1) {
2851 reg = readl(&regs->dbg_link1);
2852
2853 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK;
2854 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) |
2855 DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET;
2856 writel(reg, &regs->dbg_link1);
2857 }
2858
2859 /*
2860 * By default some platforms has set protected access to memory.
2861 * This cause problem with cache, so driver restore non-secure
2862 * access to memory.
2863 */
2864 reg = readl(&regs->dma_axi_ctrl);
2865 reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) |
2866 DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE);
2867 writel(reg, &regs->dma_axi_ctrl);
2868
2869 /* enable generic interrupt*/
2870 writel(USB_IEN_INIT, &regs->usb_ien);
2871 writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, &regs->usb_conf);
Peter Chenb5148d92020-09-01 10:33:49 +08002872 /* keep Fast Access bit */
2873 writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002874
2875 cdns3_configure_dmult(priv_dev, NULL);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002876}
2877
2878/**
Lee Jones56480a02021-05-26 14:00:20 +01002879 * cdns3_gadget_udc_start - Gadget start
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002880 * @gadget: gadget object
2881 * @driver: driver which operates on this gadget
2882 *
2883 * Returns 0 on success, error code elsewhere
2884 */
2885static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
2886 struct usb_gadget_driver *driver)
2887{
2888 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2889 unsigned long flags;
Roger Quadros94e259f2019-10-30 14:16:07 +02002890 enum usb_device_speed max_speed = driver->max_speed;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002891
2892 spin_lock_irqsave(&priv_dev->lock, flags);
2893 priv_dev->gadget_driver = driver;
Roger Quadros94e259f2019-10-30 14:16:07 +02002894
2895 /* limit speed if necessary */
2896 max_speed = min(driver->max_speed, gadget->max_speed);
2897
2898 switch (max_speed) {
2899 case USB_SPEED_FULL:
2900 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
2901 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
2902 break;
2903 case USB_SPEED_HIGH:
2904 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
2905 break;
2906 case USB_SPEED_SUPER:
2907 break;
2908 default:
2909 dev_err(priv_dev->dev,
2910 "invalid maximum_speed parameter %d\n",
2911 max_speed);
Gustavo A. R. Silva0d9b6d42020-07-07 14:56:07 -05002912 fallthrough;
Roger Quadros94e259f2019-10-30 14:16:07 +02002913 case USB_SPEED_UNKNOWN:
2914 /* default to superspeed */
2915 max_speed = USB_SPEED_SUPER;
2916 break;
2917 }
2918
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002919 cdns3_gadget_config(priv_dev);
2920 spin_unlock_irqrestore(&priv_dev->lock, flags);
2921 return 0;
2922}
2923
2924/**
Lee Jones56480a02021-05-26 14:00:20 +01002925 * cdns3_gadget_udc_stop - Stops gadget
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002926 * @gadget: gadget object
2927 *
2928 * Returns 0
2929 */
2930static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
2931{
2932 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2933 struct cdns3_endpoint *priv_ep;
2934 u32 bEndpointAddress;
2935 struct usb_ep *ep;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002936 int val;
2937
2938 priv_dev->gadget_driver = NULL;
2939
2940 priv_dev->onchip_used_size = 0;
2941 priv_dev->out_mem_is_allocated = 0;
2942 priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
2943
2944 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
2945 priv_ep = ep_to_cdns3_ep(ep);
2946 bEndpointAddress = priv_ep->num | priv_ep->dir;
2947 cdns3_select_ep(priv_dev, bEndpointAddress);
2948 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2949 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2950 !(val & EP_CMD_EPRST), 1, 100);
Sanket Parmarf5c8d292019-10-29 12:24:41 +00002951
2952 priv_ep->flags &= ~EP_CLAIMED;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002953 }
2954
2955 /* disable interrupt for device */
2956 writel(0, &priv_dev->regs->usb_ien);
Peter Chenb5148d92020-09-01 10:33:49 +08002957 writel(0, &priv_dev->regs->usb_pwr);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002958 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
2959
Xu Wang8e1a2002019-12-20 07:19:38 +00002960 return 0;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002961}
2962
2963static const struct usb_gadget_ops cdns3_gadget_ops = {
2964 .get_frame = cdns3_gadget_get_frame,
2965 .wakeup = cdns3_gadget_wakeup,
2966 .set_selfpowered = cdns3_gadget_set_selfpowered,
2967 .pullup = cdns3_gadget_pullup,
2968 .udc_start = cdns3_gadget_udc_start,
2969 .udc_stop = cdns3_gadget_udc_stop,
2970 .match_ep = cdns3_gadget_match_ep,
2971};
2972
2973static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
2974{
2975 int i;
2976
2977 /* ep0 OUT point to ep0 IN. */
2978 priv_dev->eps[16] = NULL;
2979
2980 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
2981 if (priv_dev->eps[i]) {
2982 cdns3_free_trb_pool(priv_dev->eps[i]);
2983 devm_kfree(priv_dev->dev, priv_dev->eps[i]);
2984 }
2985}
2986
2987/**
Lee Jones56480a02021-05-26 14:00:20 +01002988 * cdns3_init_eps - Initializes software endpoints of gadget
Lee Jones4a35aa62020-07-02 15:46:12 +01002989 * @priv_dev: extended gadget object
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01002990 *
2991 * Returns 0 on success, error code elsewhere
2992 */
2993static int cdns3_init_eps(struct cdns3_device *priv_dev)
2994{
2995 u32 ep_enabled_reg, iso_ep_reg;
2996 struct cdns3_endpoint *priv_ep;
2997 int ep_dir, ep_number;
2998 u32 ep_mask;
2999 int ret = 0;
3000 int i;
3001
3002 /* Read it from USB_CAP3 to USB_CAP5 */
3003 ep_enabled_reg = readl(&priv_dev->regs->usb_cap3);
3004 iso_ep_reg = readl(&priv_dev->regs->usb_cap4);
3005
3006 dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n");
3007
3008 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
3009 ep_dir = i >> 4; /* i div 16 */
3010 ep_number = i & 0xF; /* i % 16 */
3011 ep_mask = BIT(i);
3012
3013 if (!(ep_enabled_reg & ep_mask))
3014 continue;
3015
3016 if (ep_dir && !ep_number) {
3017 priv_dev->eps[i] = priv_dev->eps[0];
3018 continue;
3019 }
3020
3021 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep),
3022 GFP_KERNEL);
Colin Ian King4d2233e2019-09-02 19:43:34 +01003023 if (!priv_ep)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003024 goto err;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003025
3026 /* set parent of endpoint object */
3027 priv_ep->cdns3_dev = priv_dev;
3028 priv_dev->eps[i] = priv_ep;
3029 priv_ep->num = ep_number;
3030 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT;
3031
3032 if (!ep_number) {
3033 ret = cdns3_init_ep0(priv_dev, priv_ep);
3034 if (ret) {
3035 dev_err(priv_dev->dev, "Failed to init ep0\n");
3036 goto err;
3037 }
3038 } else {
3039 snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s",
3040 ep_number, !!ep_dir ? "in" : "out");
3041 priv_ep->endpoint.name = priv_ep->name;
3042
3043 usb_ep_set_maxpacket_limit(&priv_ep->endpoint,
3044 CDNS3_EP_MAX_PACKET_LIMIT);
3045 priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS;
3046 priv_ep->endpoint.ops = &cdns3_gadget_ep_ops;
3047 if (ep_dir)
3048 priv_ep->endpoint.caps.dir_in = 1;
3049 else
3050 priv_ep->endpoint.caps.dir_out = 1;
3051
3052 if (iso_ep_reg & ep_mask)
3053 priv_ep->endpoint.caps.type_iso = 1;
3054
3055 priv_ep->endpoint.caps.type_bulk = 1;
3056 priv_ep->endpoint.caps.type_int = 1;
3057
3058 list_add_tail(&priv_ep->endpoint.ep_list,
3059 &priv_dev->gadget.ep_list);
3060 }
3061
3062 priv_ep->flags = 0;
3063
Peter Cheneed6ed62020-03-31 16:10:05 +08003064 dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n",
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003065 priv_ep->name,
3066 priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "",
3067 priv_ep->endpoint.caps.type_iso ? "ISO" : "");
3068
3069 INIT_LIST_HEAD(&priv_ep->pending_req_list);
3070 INIT_LIST_HEAD(&priv_ep->deferred_req_list);
Pawel Laszczak6bbf87a2019-08-26 12:19:31 +01003071 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003072 }
3073
3074 return 0;
3075err:
3076 cdns3_free_all_eps(priv_dev);
3077 return -ENOMEM;
3078}
3079
Peter Chen6b777892020-08-21 10:55:47 +08003080static void cdns3_gadget_release(struct device *dev)
3081{
3082 struct cdns3_device *priv_dev = container_of(dev,
3083 struct cdns3_device, gadget.dev);
3084
3085 kfree(priv_dev);
3086}
3087
Pawel Laszczak0b490042020-12-07 11:32:21 +01003088static void cdns3_gadget_exit(struct cdns *cdns)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003089{
3090 struct cdns3_device *priv_dev;
3091
3092 priv_dev = cdns->gadget_dev;
3093
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003094
3095 pm_runtime_mark_last_busy(cdns->dev);
3096 pm_runtime_put_autosuspend(cdns->dev);
3097
Peter Chen6b777892020-08-21 10:55:47 +08003098 usb_del_gadget(&priv_dev->gadget);
Peter Chen98df91f2020-09-01 10:35:49 +08003099 devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003100
3101 cdns3_free_all_eps(priv_dev);
3102
3103 while (!list_empty(&priv_dev->aligned_buf_list)) {
3104 struct cdns3_aligned_buf *buf;
3105
3106 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list);
Sanket Parmar8430e982021-03-22 11:26:30 +01003107 dma_free_noncoherent(priv_dev->sysdev, buf->size,
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003108 buf->buf,
Sanket Parmar8430e982021-03-22 11:26:30 +01003109 buf->dma,
3110 buf->dir);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003111
3112 list_del(&buf->list);
3113 kfree(buf);
3114 }
3115
3116 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
3117 priv_dev->setup_dma);
Sanket Parmarb9b1eae2021-03-09 06:19:39 +01003118 dma_pool_destroy(priv_dev->eps_dma_pool);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003119
3120 kfree(priv_dev->zlp_buf);
Peter Chen6b777892020-08-21 10:55:47 +08003121 usb_put_gadget(&priv_dev->gadget);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003122 cdns->gadget_dev = NULL;
Pawel Laszczak0b490042020-12-07 11:32:21 +01003123 cdns_drd_gadget_off(cdns);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003124}
3125
Pawel Laszczak0b490042020-12-07 11:32:21 +01003126static int cdns3_gadget_start(struct cdns *cdns)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003127{
3128 struct cdns3_device *priv_dev;
3129 u32 max_speed;
3130 int ret;
3131
3132 priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL);
3133 if (!priv_dev)
3134 return -ENOMEM;
3135
Peter Chen6b777892020-08-21 10:55:47 +08003136 usb_initialize_gadget(cdns->dev, &priv_dev->gadget,
3137 cdns3_gadget_release);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003138 cdns->gadget_dev = priv_dev;
3139 priv_dev->sysdev = cdns->dev;
3140 priv_dev->dev = cdns->dev;
3141 priv_dev->regs = cdns->dev_regs;
3142
3143 device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size",
3144 &priv_dev->onchip_buffers);
3145
3146 if (priv_dev->onchip_buffers <= 0) {
3147 u32 reg = readl(&priv_dev->regs->usb_cap2);
3148
3149 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg);
3150 }
3151
3152 if (!priv_dev->onchip_buffers)
3153 priv_dev->onchip_buffers = 256;
3154
3155 max_speed = usb_get_maximum_speed(cdns->dev);
3156
3157 /* Check the maximum_speed parameter */
3158 switch (max_speed) {
3159 case USB_SPEED_FULL:
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003160 case USB_SPEED_HIGH:
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003161 case USB_SPEED_SUPER:
3162 break;
3163 default:
3164 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n",
3165 max_speed);
Gustavo A. R. Silva0d9b6d42020-07-07 14:56:07 -05003166 fallthrough;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003167 case USB_SPEED_UNKNOWN:
3168 /* default to superspeed */
3169 max_speed = USB_SPEED_SUPER;
3170 break;
3171 }
3172
3173 /* fill gadget fields */
3174 priv_dev->gadget.max_speed = max_speed;
3175 priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
3176 priv_dev->gadget.ops = &cdns3_gadget_ops;
3177 priv_dev->gadget.name = "usb-ss-gadget";
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003178 priv_dev->gadget.quirk_avoids_skb_reserve = 1;
Peter Chen77f30ff2020-05-10 13:30:42 +08003179 priv_dev->gadget.irq = cdns->dev_irq;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003180
3181 spin_lock_init(&priv_dev->lock);
3182 INIT_WORK(&priv_dev->pending_status_wq,
3183 cdns3_pending_setup_status_handler);
3184
3185 INIT_WORK(&priv_dev->aligned_buf_wq,
3186 cdns3_free_aligned_request_buf);
3187
3188 /* initialize endpoint container */
3189 INIT_LIST_HEAD(&priv_dev->gadget.ep_list);
3190 INIT_LIST_HEAD(&priv_dev->aligned_buf_list);
Sanket Parmarb9b1eae2021-03-09 06:19:39 +01003191 priv_dev->eps_dma_pool = dma_pool_create("cdns3_eps_dma_pool",
3192 priv_dev->sysdev,
3193 TRB_RING_SIZE, 8, 0);
3194 if (!priv_dev->eps_dma_pool) {
3195 dev_err(priv_dev->dev, "Failed to create TRB dma pool\n");
3196 ret = -ENOMEM;
3197 goto err1;
3198 }
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003199
3200 ret = cdns3_init_eps(priv_dev);
3201 if (ret) {
3202 dev_err(priv_dev->dev, "Failed to create endpoints\n");
3203 goto err1;
3204 }
3205
3206 /* allocate memory for setup packet buffer */
3207 priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8,
3208 &priv_dev->setup_dma, GFP_DMA);
3209 if (!priv_dev->setup_buf) {
3210 ret = -ENOMEM;
3211 goto err2;
3212 }
3213
3214 priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6);
3215
3216 dev_dbg(priv_dev->dev, "Device Controller version: %08x\n",
3217 readl(&priv_dev->regs->usb_cap6));
3218 dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n",
3219 readl(&priv_dev->regs->usb_cap1));
Colin Ian King5d041112019-09-03 13:07:10 +01003220 dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n",
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003221 readl(&priv_dev->regs->usb_cap2));
3222
3223 priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver);
Peter Chend6be7c92020-09-10 17:11:29 +08003224 if (priv_dev->dev_ver >= DEV_VER_V2)
3225 priv_dev->gadget.sg_supported = 1;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003226
3227 priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL);
3228 if (!priv_dev->zlp_buf) {
3229 ret = -ENOMEM;
3230 goto err3;
3231 }
3232
3233 /* add USB gadget device */
Peter Chen6b777892020-08-21 10:55:47 +08003234 ret = usb_add_gadget(&priv_dev->gadget);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003235 if (ret < 0) {
Peter Chen6b777892020-08-21 10:55:47 +08003236 dev_err(priv_dev->dev, "Failed to add gadget\n");
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003237 goto err4;
3238 }
3239
3240 return 0;
3241err4:
3242 kfree(priv_dev->zlp_buf);
3243err3:
3244 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
3245 priv_dev->setup_dma);
3246err2:
3247 cdns3_free_all_eps(priv_dev);
3248err1:
Sanket Parmarb9b1eae2021-03-09 06:19:39 +01003249 dma_pool_destroy(priv_dev->eps_dma_pool);
3250
Peter Chen6b777892020-08-21 10:55:47 +08003251 usb_put_gadget(&priv_dev->gadget);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003252 cdns->gadget_dev = NULL;
3253 return ret;
3254}
3255
Pawel Laszczak0b490042020-12-07 11:32:21 +01003256static int __cdns3_gadget_init(struct cdns *cdns)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003257{
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003258 int ret = 0;
3259
Pawel Laszczakeb21a742019-10-07 13:03:23 +01003260 /* Ensure 32-bit DMA Mask in case we switched back from Host mode */
3261 ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32));
3262 if (ret) {
3263 dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret);
3264 return ret;
3265 }
3266
Pawel Laszczak0b490042020-12-07 11:32:21 +01003267 cdns_drd_gadget_on(cdns);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003268 pm_runtime_get_sync(cdns->dev);
3269
3270 ret = cdns3_gadget_start(cdns);
3271 if (ret)
3272 return ret;
3273
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003274 /*
3275 * Because interrupt line can be shared with other components in
3276 * driver it can't use IRQF_ONESHOT flag here.
3277 */
3278 ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
3279 cdns3_device_irq_handler,
3280 cdns3_device_thread_irq_handler,
Peter Chenaf58e1f2019-12-27 17:10:04 +08003281 IRQF_SHARED, dev_name(cdns->dev),
3282 cdns->gadget_dev);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003283
3284 if (ret)
3285 goto err0;
3286
3287 return 0;
3288err0:
3289 cdns3_gadget_exit(cdns);
3290 return ret;
3291}
3292
Pawel Laszczak0b490042020-12-07 11:32:21 +01003293static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup)
Peter Chene11d2bf2020-10-29 17:55:18 +08003294__must_hold(&cdns->lock)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003295{
3296 struct cdns3_device *priv_dev = cdns->gadget_dev;
3297
Peter Chene11d2bf2020-10-29 17:55:18 +08003298 spin_unlock(&cdns->lock);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003299 cdns3_disconnect_gadget(priv_dev);
Peter Chene11d2bf2020-10-29 17:55:18 +08003300 spin_lock(&cdns->lock);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003301
3302 priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
3303 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
3304 cdns3_hw_reset_eps_config(priv_dev);
3305
3306 /* disable interrupt for device */
3307 writel(0, &priv_dev->regs->usb_ien);
3308
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003309 return 0;
3310}
3311
Pawel Laszczak0b490042020-12-07 11:32:21 +01003312static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003313{
3314 struct cdns3_device *priv_dev = cdns->gadget_dev;
3315
3316 if (!priv_dev->gadget_driver)
3317 return 0;
3318
3319 cdns3_gadget_config(priv_dev);
Frank Li2cf25812021-02-18 16:51:08 -06003320 if (hibernated)
3321 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003322
3323 return 0;
3324}
3325
3326/**
3327 * cdns3_gadget_init - initialize device structure
3328 *
Pawel Laszczak0b490042020-12-07 11:32:21 +01003329 * @cdns: cdns instance
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003330 *
3331 * This function initializes the gadget.
3332 */
Pawel Laszczak0b490042020-12-07 11:32:21 +01003333int cdns3_gadget_init(struct cdns *cdns)
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003334{
Pawel Laszczak0b490042020-12-07 11:32:21 +01003335 struct cdns_role_driver *rdrv;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003336
3337 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
3338 if (!rdrv)
3339 return -ENOMEM;
3340
3341 rdrv->start = __cdns3_gadget_init;
3342 rdrv->stop = cdns3_gadget_exit;
3343 rdrv->suspend = cdns3_gadget_suspend;
3344 rdrv->resume = cdns3_gadget_resume;
Pawel Laszczak0b490042020-12-07 11:32:21 +01003345 rdrv->state = CDNS_ROLE_STATE_INACTIVE;
Pawel Laszczak7733f6c2019-08-26 12:19:30 +01003346 rdrv->name = "gadget";
3347 cdns->roles[USB_ROLE_DEVICE] = rdrv;
3348
3349 return 0;
3350}