blob: 5f9c65959dd2ed623dcfca0fdfc4ec2061b53ddc [file] [log] [blame]
Andreas Larsson27e9dcc2013-12-23 21:25:49 +01001/*
2 * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
3 *
4 * 2013 (c) Aeroflex Gaisler AB
5 *
6 * This driver supports GRUSBDC USB Device Controller cores available in the
7 * GRLIB VHDL IP core library.
8 *
9 * Full documentation of the GRUSBDC core can be found here:
10 * http://www.gaisler.com/products/grlib/grip.pdf
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 * Contributors:
18 * - Andreas Larsson <andreas@gaisler.com>
19 * - Marko Isomaki
20 */
21
22/*
23 * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
24 * individually configurable to any of the four USB transfer types. This driver
25 * only supports cores in DMA mode.
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32#include <linux/errno.h>
33#include <linux/init.h>
34#include <linux/list.h>
35#include <linux/interrupt.h>
36#include <linux/device.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/dma-mapping.h>
40#include <linux/dmapool.h>
41#include <linux/debugfs.h>
42#include <linux/seq_file.h>
43#include <linux/of_platform.h>
44#include <linux/of_irq.h>
45#include <linux/of_address.h>
46
47#include <asm/byteorder.h>
48
49#include "gr_udc.h"
50
51#define DRIVER_NAME "gr_udc"
52#define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
53
54static const char driver_name[] = DRIVER_NAME;
55static const char driver_desc[] = DRIVER_DESC;
56
57#define gr_read32(x) (ioread32be((x)))
58#define gr_write32(x, v) (iowrite32be((v), (x)))
59
60/* USB speed and corresponding string calculated from status register value */
61#define GR_SPEED(status) \
62 ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
63#define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
64
65/* Size of hardware buffer calculated from epctrl register value */
66#define GR_BUFFER_SIZE(epctrl) \
67 ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
68 GR_EPCTRL_BUFSZ_SCALER)
69
70/* ---------------------------------------------------------------------- */
71/* Debug printout functionality */
72
73static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
74
75static const char *gr_ep0state_string(enum gr_ep0state state)
76{
77 static const char *const names[] = {
78 [GR_EP0_DISCONNECT] = "disconnect",
79 [GR_EP0_SETUP] = "setup",
80 [GR_EP0_IDATA] = "idata",
81 [GR_EP0_ODATA] = "odata",
82 [GR_EP0_ISTATUS] = "istatus",
83 [GR_EP0_OSTATUS] = "ostatus",
84 [GR_EP0_STALL] = "stall",
85 [GR_EP0_SUSPEND] = "suspend",
86 };
87
88 if (state < 0 || state >= ARRAY_SIZE(names))
89 return "UNKNOWN";
90
91 return names[state];
92}
93
94#ifdef VERBOSE_DEBUG
95
96static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
97 struct gr_request *req)
98{
99 int buflen = ep->is_in ? req->req.length : req->req.actual;
100 int rowlen = 32;
101 int plen = min(rowlen, buflen);
102
103 dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
104 (buflen > plen ? " (truncated)" : ""));
105 print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
106 rowlen, 4, req->req.buf, plen, false);
107}
108
109static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
110 u16 value, u16 index, u16 length)
111{
112 dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
113 type, request, value, index, length);
114}
115#else /* !VERBOSE_DEBUG */
116
117static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
118 struct gr_request *req) {}
119
120static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
121 u16 value, u16 index, u16 length) {}
122
123#endif /* VERBOSE_DEBUG */
124
125/* ---------------------------------------------------------------------- */
126/* Debugfs functionality */
127
128#ifdef CONFIG_USB_GADGET_DEBUG_FS
129
130static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
131{
132 u32 epctrl = gr_read32(&ep->regs->epctrl);
133 u32 epstat = gr_read32(&ep->regs->epstat);
134 int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
135 struct gr_request *req;
136
137 seq_printf(seq, "%s:\n", ep->ep.name);
138 seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
139 seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
140 seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
141 seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
142 seq_printf(seq, " dma_start = %d\n", ep->dma_start);
143 seq_printf(seq, " stopped = %d\n", ep->stopped);
144 seq_printf(seq, " wedged = %d\n", ep->wedged);
145 seq_printf(seq, " callback = %d\n", ep->callback);
146 seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
147 seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
148 if (mode == 1 || mode == 3)
149 seq_printf(seq, " nt = %d\n",
150 (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
151
152 seq_printf(seq, " Buffer 0: %s %s%d\n",
153 epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
154 epstat & GR_EPSTAT_BS ? " " : "selected ",
155 (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
156 seq_printf(seq, " Buffer 1: %s %s%d\n",
157 epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
158 epstat & GR_EPSTAT_BS ? "selected " : " ",
159 (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
160
161 if (list_empty(&ep->queue)) {
162 seq_puts(seq, " Queue: empty\n\n");
163 return;
164 }
165
166 seq_puts(seq, " Queue:\n");
167 list_for_each_entry(req, &ep->queue, queue) {
168 struct gr_dma_desc *desc;
169 struct gr_dma_desc *next;
170
171 seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
172 &req->req.buf, req->req.actual, req->req.length);
173
174 next = req->first_desc;
175 do {
176 desc = next;
177 next = desc->next_desc;
178 seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
179 desc == req->curr_desc ? 'c' : ' ',
180 desc, desc->paddr, desc->ctrl, desc->data);
181 } while (desc != req->last_desc);
182 }
183 seq_puts(seq, "\n");
184}
185
186
187static int gr_seq_show(struct seq_file *seq, void *v)
188{
189 struct gr_udc *dev = seq->private;
190 u32 control = gr_read32(&dev->regs->control);
191 u32 status = gr_read32(&dev->regs->status);
192 struct gr_ep *ep;
193
194 seq_printf(seq, "usb state = %s\n",
195 usb_state_string(dev->gadget.state));
196 seq_printf(seq, "address = %d\n",
197 (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
198 seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
199 seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
200 seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
201 seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
202 seq_printf(seq, "test_mode = %d\n", dev->test_mode);
203 seq_puts(seq, "\n");
204
205 list_for_each_entry(ep, &dev->ep_list, ep_list)
206 gr_seq_ep_show(seq, ep);
207
208 return 0;
209}
210
211static int gr_dfs_open(struct inode *inode, struct file *file)
212{
213 return single_open(file, gr_seq_show, inode->i_private);
214}
215
216static const struct file_operations gr_dfs_fops = {
217 .owner = THIS_MODULE,
218 .open = gr_dfs_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
224static void gr_dfs_create(struct gr_udc *dev)
225{
226 const char *name = "gr_udc_state";
227
228 dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
229 if (IS_ERR(dev->dfs_root)) {
230 dev_err(dev->dev, "Failed to create debugfs directory\n");
231 return;
232 }
233 dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root,
234 dev, &gr_dfs_fops);
235 if (IS_ERR(dev->dfs_state))
236 dev_err(dev->dev, "Failed to create debugfs file %s\n", name);
237}
238
239static void gr_dfs_delete(struct gr_udc *dev)
240{
241 /* Handles NULL and ERR pointers internally */
242 debugfs_remove(dev->dfs_state);
243 debugfs_remove(dev->dfs_root);
244}
245
246#else /* !CONFIG_USB_GADGET_DEBUG_FS */
247
248static void gr_dfs_create(struct gr_udc *dev) {}
249static void gr_dfs_delete(struct gr_udc *dev) {}
250
251#endif /* CONFIG_USB_GADGET_DEBUG_FS */
252
253/* ---------------------------------------------------------------------- */
254/* DMA and request handling */
255
256/* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
257static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
258{
259 dma_addr_t paddr;
260 struct gr_dma_desc *dma_desc;
261
262 dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr);
263 if (!dma_desc) {
264 dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
265 return NULL;
266 }
267
268 memset(dma_desc, 0, sizeof(*dma_desc));
269 dma_desc->paddr = paddr;
270
271 return dma_desc;
272}
273
274static inline void gr_free_dma_desc(struct gr_udc *dev,
275 struct gr_dma_desc *desc)
276{
277 dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
278}
279
280/* Frees the chain of struct gr_dma_desc for the given request */
281static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
282{
283 struct gr_dma_desc *desc;
284 struct gr_dma_desc *next;
285
286 next = req->first_desc;
287 if (!next)
288 return;
289
290 do {
291 desc = next;
292 next = desc->next_desc;
293 gr_free_dma_desc(dev, desc);
294 } while (desc != req->last_desc);
295
296 req->first_desc = NULL;
297 req->curr_desc = NULL;
298 req->last_desc = NULL;
299}
300
301static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
302
303/*
304 * Frees allocated resources and calls the appropriate completion function/setup
305 * package handler for a finished request.
306 *
307 * Must be called with dev->lock held and irqs disabled.
308 */
309static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
310 int status)
311 __releases(&dev->lock)
312 __acquires(&dev->lock)
313{
314 struct gr_udc *dev;
315
316 list_del_init(&req->queue);
317
318 if (likely(req->req.status == -EINPROGRESS))
319 req->req.status = status;
320 else
321 status = req->req.status;
322
323 dev = ep->dev;
324 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
325 gr_free_dma_desc_chain(dev, req);
326
327 if (ep->is_in) /* For OUT, actual gets updated bit by bit */
328 req->req.actual = req->req.length;
329
330 if (!status) {
331 if (ep->is_in)
332 gr_dbgprint_request("SENT", ep, req);
333 else
334 gr_dbgprint_request("RECV", ep, req);
335 }
336
337 /* Prevent changes to ep->queue during callback */
338 ep->callback = 1;
339 if (req == dev->ep0reqo && !status) {
340 if (req->setup)
341 gr_ep0_setup(dev, req);
342 else
343 dev_err(dev->dev,
344 "Unexpected non setup packet on ep0in\n");
345 } else if (req->req.complete) {
346 spin_unlock(&dev->lock);
347
348 req->req.complete(&ep->ep, &req->req);
349
350 spin_lock(&dev->lock);
351 }
352 ep->callback = 0;
353}
354
355static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
356{
357 struct gr_request *req;
358
359 req = kzalloc(sizeof(*req), gfp_flags);
360 if (!req)
361 return NULL;
362
363 INIT_LIST_HEAD(&req->queue);
364
365 return &req->req;
366}
367
368/*
369 * Starts DMA for endpoint ep if there are requests in the queue.
370 *
371 * Must be called with dev->lock held and with !ep->stopped.
372 */
373static void gr_start_dma(struct gr_ep *ep)
374{
375 struct gr_request *req;
376 u32 dmactrl;
377
378 if (list_empty(&ep->queue)) {
379 ep->dma_start = 0;
380 return;
381 }
382
383 req = list_first_entry(&ep->queue, struct gr_request, queue);
384
385 /* A descriptor should already have been allocated */
386 BUG_ON(!req->curr_desc);
387
388 wmb(); /* Make sure all is settled before handing it over to DMA */
389
390 /* Set the descriptor pointer in the hardware */
391 gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
392
393 /* Announce available descriptors */
394 dmactrl = gr_read32(&ep->regs->dmactrl);
395 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
396
397 ep->dma_start = 1;
398}
399
400/*
401 * Finishes the first request in the ep's queue and, if available, starts the
402 * next request in queue.
403 *
404 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
405 */
406static void gr_dma_advance(struct gr_ep *ep, int status)
407{
408 struct gr_request *req;
409
410 req = list_first_entry(&ep->queue, struct gr_request, queue);
411 gr_finish_request(ep, req, status);
412 gr_start_dma(ep); /* Regardless of ep->dma_start */
413}
414
415/*
416 * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
417 * transfer to be canceled and clears GR_DMACTRL_DA.
418 *
419 * Must be called with dev->lock held.
420 */
421static void gr_abort_dma(struct gr_ep *ep)
422{
423 u32 dmactrl;
424
425 dmactrl = gr_read32(&ep->regs->dmactrl);
426 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
427}
428
429/*
430 * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
431 * chain.
432 *
433 * Size is not used for OUT endpoints. Hardware can not be instructed to handle
434 * smaller buffer than MAXPL in the OUT direction.
435 */
436static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
437 dma_addr_t data, unsigned size, gfp_t gfp_flags)
438{
439 struct gr_dma_desc *desc;
440
441 desc = gr_alloc_dma_desc(ep, gfp_flags);
442 if (!desc)
443 return -ENOMEM;
444
445 desc->data = data;
446 if (ep->is_in)
447 desc->ctrl =
448 (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
449 else
450 desc->ctrl = GR_DESC_OUT_CTRL_IE;
451
452 if (!req->first_desc) {
453 req->first_desc = desc;
454 req->curr_desc = desc;
455 } else {
456 req->last_desc->next_desc = desc;
457 req->last_desc->next = desc->paddr;
458 req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
459 }
460 req->last_desc = desc;
461
462 return 0;
463}
464
465/*
466 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
467 * together covers req->req.length bytes of the buffer at DMA address
468 * req->req.dma for the OUT direction.
469 *
470 * The first descriptor in the chain is enabled, the rest disabled. The
471 * interrupt handler will later enable them one by one when needed so we can
472 * find out when the transfer is finished. For OUT endpoints, all descriptors
473 * therefore generate interrutps.
474 */
475static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
476 gfp_t gfp_flags)
477{
478 u16 bytes_left; /* Bytes left to provide descriptors for */
479 u16 bytes_used; /* Bytes accommodated for */
480 int ret = 0;
481
482 req->first_desc = NULL; /* Signals that no allocation is done yet */
483 bytes_left = req->req.length;
484 bytes_used = 0;
485 while (bytes_left > 0) {
486 dma_addr_t start = req->req.dma + bytes_used;
487 u16 size = min(bytes_left, ep->bytes_per_buffer);
488
489 /* Should not happen however - gr_queue stops such lengths */
490 if (size < ep->bytes_per_buffer)
491 dev_warn(ep->dev->dev,
492 "Buffer overrun risk: %u < %u bytes/buffer\n",
493 size, ep->bytes_per_buffer);
494
495 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
496 if (ret)
497 goto alloc_err;
498
499 bytes_left -= size;
500 bytes_used += size;
501 }
502
503 req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
504
505 return 0;
506
507alloc_err:
508 gr_free_dma_desc_chain(ep->dev, req);
509
510 return ret;
511}
512
513/*
514 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
515 * together covers req->req.length bytes of the buffer at DMA address
516 * req->req.dma for the IN direction.
517 *
518 * When more data is provided than the maximum payload size, the hardware splits
519 * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
520 * is always set to a multiple of the maximum payload (restricted to the valid
521 * number of maximum payloads during high bandwidth isochronous or interrupt
522 * transfers)
523 *
524 * All descriptors are enabled from the beginning and we only generate an
525 * interrupt for the last one indicating that the entire request has been pushed
526 * to hardware.
527 */
528static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
529 gfp_t gfp_flags)
530{
531 u16 bytes_left; /* Bytes left in req to provide descriptors for */
532 u16 bytes_used; /* Bytes in req accommodated for */
533 int ret = 0;
534
535 req->first_desc = NULL; /* Signals that no allocation is done yet */
536 bytes_left = req->req.length;
537 bytes_used = 0;
538 do { /* Allow for zero length packets */
539 dma_addr_t start = req->req.dma + bytes_used;
540 u16 size = min(bytes_left, ep->bytes_per_buffer);
541
542 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
543 if (ret)
544 goto alloc_err;
545
546 bytes_left -= size;
547 bytes_used += size;
548 } while (bytes_left > 0);
549
550 /*
551 * Send an extra zero length packet to indicate that no more data is
552 * available when req->req.zero is set and the data length is even
553 * multiples of ep->ep.maxpacket.
554 */
555 if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
556 ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
557 if (ret)
558 goto alloc_err;
559 }
560
561 /*
562 * For IN packets we only want to know when the last packet has been
563 * transmitted (not just put into internal buffers).
564 */
565 req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
566
567 return 0;
568
569alloc_err:
570 gr_free_dma_desc_chain(ep->dev, req);
571
572 return ret;
573}
574
575/* Must be called with dev->lock held */
576static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
577{
578 struct gr_udc *dev = ep->dev;
579 int ret;
580
581 if (unlikely(!ep->ep.desc && ep->num != 0)) {
582 dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
583 return -EINVAL;
584 }
585
586 if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
587 dev_err(dev->dev,
588 "Invalid request for %s: buf=%p list_empty=%d\n",
589 ep->ep.name, req->req.buf, list_empty(&req->queue));
590 return -EINVAL;
591 }
592
593 /*
594 * The DMA controller can not handle smaller OUT buffers than
595 * maxpacket. It could lead to buffer overruns if unexpectedly long
596 * packet are received.
597 */
598 if (!ep->is_in && (req->req.length % ep->ep.maxpacket) != 0) {
599 dev_err(dev->dev,
600 "OUT request length %d is not multiple of maxpacket\n",
601 req->req.length);
602 return -EMSGSIZE;
603 }
604
605 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
606 dev_err(dev->dev, "-ESHUTDOWN");
607 return -ESHUTDOWN;
608 }
609
610 /* Can't touch registers when suspended */
611 if (dev->ep0state == GR_EP0_SUSPEND) {
612 dev_err(dev->dev, "-EBUSY");
613 return -EBUSY;
614 }
615
616 /* Set up DMA mapping in case the caller didn't */
617 ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
618 if (ret) {
619 dev_err(dev->dev, "usb_gadget_map_request");
620 return ret;
621 }
622
623 if (ep->is_in)
624 ret = gr_setup_in_desc_list(ep, req, gfp_flags);
625 else
626 ret = gr_setup_out_desc_list(ep, req, gfp_flags);
627 if (ret)
628 return ret;
629
630 req->req.status = -EINPROGRESS;
631 req->req.actual = 0;
632 list_add_tail(&req->queue, &ep->queue);
633
634 /* Start DMA if not started, otherwise interrupt handler handles it */
635 if (!ep->dma_start && likely(!ep->stopped))
636 gr_start_dma(ep);
637
638 return 0;
639}
640
641/*
642 * Queue a request from within the driver.
643 *
644 * Must be called with dev->lock held.
645 */
646static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
647 gfp_t gfp_flags)
648{
649 if (ep->is_in)
650 gr_dbgprint_request("RESP", ep, req);
651
652 return gr_queue(ep, req, gfp_flags);
653}
654
655/* ---------------------------------------------------------------------- */
656/* General helper functions */
657
658/*
659 * Dequeue ALL requests.
660 *
661 * Must be called with dev->lock held and irqs disabled.
662 */
663static void gr_ep_nuke(struct gr_ep *ep)
664{
665 struct gr_request *req;
666 struct gr_udc *dev;
667
668 dev = ep->dev;
669
670 ep->stopped = 1;
671 ep->dma_start = 0;
672 gr_abort_dma(ep);
673
674 while (!list_empty(&ep->queue)) {
675 req = list_first_entry(&ep->queue, struct gr_request, queue);
676 gr_finish_request(ep, req, -ESHUTDOWN);
677 }
678}
679
680/*
681 * Reset the hardware state of this endpoint.
682 *
683 * Must be called with dev->lock held.
684 */
685static void gr_ep_reset(struct gr_ep *ep)
686{
687 gr_write32(&ep->regs->epctrl, 0);
688 gr_write32(&ep->regs->dmactrl, 0);
689
690 ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
691 ep->ep.desc = NULL;
692 ep->stopped = 1;
693 ep->dma_start = 0;
694}
695
696/*
697 * Generate STALL on ep0in/out.
698 *
699 * Must be called with dev->lock held.
700 */
701static void gr_control_stall(struct gr_udc *dev)
702{
703 u32 epctrl;
704
705 epctrl = gr_read32(&dev->epo[0].regs->epctrl);
706 gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
707 epctrl = gr_read32(&dev->epi[0].regs->epctrl);
708 gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
709
710 dev->ep0state = GR_EP0_STALL;
711}
712
713/*
714 * Halts, halts and wedges, or clears halt for an endpoint.
715 *
716 * Must be called with dev->lock held.
717 */
718static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
719{
720 u32 epctrl;
721 int retval = 0;
722
723 if (ep->num && !ep->ep.desc)
724 return -EINVAL;
725
726 if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
727 return -EOPNOTSUPP;
728
729 /* Never actually halt ep0, and therefore never clear halt for ep0 */
730 if (!ep->num) {
731 if (halt && !fromhost) {
732 /* ep0 halt from gadget - generate protocol stall */
733 gr_control_stall(ep->dev);
734 dev_dbg(ep->dev->dev, "EP: stall ep0\n");
735 return 0;
736 }
737 return -EINVAL;
738 }
739
740 dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
741 (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
742
743 epctrl = gr_read32(&ep->regs->epctrl);
744 if (halt) {
745 /* Set HALT */
746 gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
747 ep->stopped = 1;
748 if (wedge)
749 ep->wedged = 1;
750 } else {
751 gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
752 ep->stopped = 0;
753 ep->wedged = 0;
754
755 /* Things might have been queued up in the meantime */
756 if (!ep->dma_start)
757 gr_start_dma(ep);
758 }
759
760 return retval;
761}
762
763/* Must be called with dev->lock held */
764static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
765{
766 if (dev->ep0state != value)
767 dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
768 gr_ep0state_string(value));
769 dev->ep0state = value;
770}
771
772/*
773 * Should only be called when endpoints can not generate interrupts.
774 *
775 * Must be called with dev->lock held.
776 */
777static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
778{
779 gr_write32(&dev->regs->control, 0);
780 wmb(); /* Make sure that we do not deny one of our interrupts */
781 dev->irq_enabled = 0;
782}
783
784/*
785 * Stop all device activity and disable data line pullup.
786 *
787 * Must be called with dev->lock held and irqs disabled.
788 */
789static void gr_stop_activity(struct gr_udc *dev)
790{
791 struct gr_ep *ep;
792
793 list_for_each_entry(ep, &dev->ep_list, ep_list)
794 gr_ep_nuke(ep);
795
796 gr_disable_interrupts_and_pullup(dev);
797
798 gr_set_ep0state(dev, GR_EP0_DISCONNECT);
799 usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
800}
801
802/* ---------------------------------------------------------------------- */
803/* ep0 setup packet handling */
804
805static void gr_ep0_testmode_complete(struct usb_ep *_ep,
806 struct usb_request *_req)
807{
808 struct gr_ep *ep;
809 struct gr_udc *dev;
810 u32 control;
811
812 ep = container_of(_ep, struct gr_ep, ep);
813 dev = ep->dev;
814
815 spin_lock(&dev->lock);
816
817 control = gr_read32(&dev->regs->control);
818 control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
819 gr_write32(&dev->regs->control, control);
820
821 spin_unlock(&dev->lock);
822}
823
824static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
825{
826 /* Nothing needs to be done here */
827}
828
829/*
830 * Queue a response on ep0in.
831 *
832 * Must be called with dev->lock held.
833 */
834static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
835 void (*complete)(struct usb_ep *ep,
836 struct usb_request *req))
837{
838 u8 *reqbuf = dev->ep0reqi->req.buf;
839 int status;
840 int i;
841
842 for (i = 0; i < length; i++)
843 reqbuf[i] = buf[i];
844 dev->ep0reqi->req.length = length;
845 dev->ep0reqi->req.complete = complete;
846
847 status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
848 if (status < 0)
849 dev_err(dev->dev,
850 "Could not queue ep0in setup response: %d\n", status);
851
852 return status;
853}
854
855/*
856 * Queue a 2 byte response on ep0in.
857 *
858 * Must be called with dev->lock held.
859 */
860static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
861{
862 __le16 le_response = cpu_to_le16(response);
863
864 return gr_ep0_respond(dev, (u8 *)&le_response, 2,
865 gr_ep0_dummy_complete);
866}
867
868/*
869 * Queue a ZLP response on ep0in.
870 *
871 * Must be called with dev->lock held.
872 */
873static inline int gr_ep0_respond_empty(struct gr_udc *dev)
874{
875 return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
876}
877
878/*
879 * This is run when a SET_ADDRESS request is received. First writes
880 * the new address to the control register which is updated internally
881 * when the next IN packet is ACKED.
882 *
883 * Must be called with dev->lock held.
884 */
885static void gr_set_address(struct gr_udc *dev, u8 address)
886{
887 u32 control;
888
889 control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
890 control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
891 control |= GR_CONTROL_SU;
892 gr_write32(&dev->regs->control, control);
893}
894
895/*
896 * Returns negative for STALL, 0 for successful handling and positive for
897 * delegation.
898 *
899 * Must be called with dev->lock held.
900 */
901static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
902 u16 value, u16 index)
903{
904 u16 response;
905 u8 test;
906
907 switch (request) {
908 case USB_REQ_SET_ADDRESS:
909 dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
910 gr_set_address(dev, value & 0xff);
911 if (value)
912 usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
913 else
914 usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
915 return gr_ep0_respond_empty(dev);
916
917 case USB_REQ_GET_STATUS:
918 /* Self powered | remote wakeup */
919 response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
920 return gr_ep0_respond_u16(dev, response);
921
922 case USB_REQ_SET_FEATURE:
923 switch (value) {
924 case USB_DEVICE_REMOTE_WAKEUP:
925 /* Allow remote wakeup */
926 dev->remote_wakeup = 1;
927 return gr_ep0_respond_empty(dev);
928
929 case USB_DEVICE_TEST_MODE:
930 /* The hardware does not support TEST_FORCE_EN */
931 test = index >> 8;
932 if (test >= TEST_J && test <= TEST_PACKET) {
933 dev->test_mode = test;
934 return gr_ep0_respond(dev, NULL, 0,
935 gr_ep0_testmode_complete);
936 }
937 }
938 break;
939
940 case USB_REQ_CLEAR_FEATURE:
941 switch (value) {
942 case USB_DEVICE_REMOTE_WAKEUP:
943 /* Disallow remote wakeup */
944 dev->remote_wakeup = 0;
945 return gr_ep0_respond_empty(dev);
946 }
947 break;
948 }
949
950 return 1; /* Delegate the rest */
951}
952
953/*
954 * Returns negative for STALL, 0 for successful handling and positive for
955 * delegation.
956 *
957 * Must be called with dev->lock held.
958 */
959static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
960 u16 value, u16 index)
961{
962 if (dev->gadget.state != USB_STATE_CONFIGURED)
963 return -1;
964
965 /*
966 * Should return STALL for invalid interfaces, but udc driver does not
967 * know anything about that. However, many gadget drivers do not handle
968 * GET_STATUS so we need to take care of that.
969 */
970
971 switch (request) {
972 case USB_REQ_GET_STATUS:
973 return gr_ep0_respond_u16(dev, 0x0000);
974
975 case USB_REQ_SET_FEATURE:
976 case USB_REQ_CLEAR_FEATURE:
977 /*
978 * No possible valid standard requests. Still let gadget drivers
979 * have a go at it.
980 */
981 break;
982 }
983
984 return 1; /* Delegate the rest */
985}
986
987/*
988 * Returns negative for STALL, 0 for successful handling and positive for
989 * delegation.
990 *
991 * Must be called with dev->lock held.
992 */
993static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
994 u16 value, u16 index)
995{
996 struct gr_ep *ep;
997 int status;
998 int halted;
999 u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
1000 u8 is_in = index & USB_ENDPOINT_DIR_MASK;
1001
1002 if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
1003 return -1;
1004
1005 if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
1006 return -1;
1007
1008 ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
1009
1010 switch (request) {
1011 case USB_REQ_GET_STATUS:
1012 halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
1013 return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
1014
1015 case USB_REQ_SET_FEATURE:
1016 switch (value) {
1017 case USB_ENDPOINT_HALT:
1018 status = gr_ep_halt_wedge(ep, 1, 0, 1);
1019 if (status >= 0)
1020 status = gr_ep0_respond_empty(dev);
1021 return status;
1022 }
1023 break;
1024
1025 case USB_REQ_CLEAR_FEATURE:
1026 switch (value) {
1027 case USB_ENDPOINT_HALT:
1028 if (ep->wedged)
1029 return -1;
1030 status = gr_ep_halt_wedge(ep, 0, 0, 1);
1031 if (status >= 0)
1032 status = gr_ep0_respond_empty(dev);
1033 return status;
1034 }
1035 break;
1036 }
1037
1038 return 1; /* Delegate the rest */
1039}
1040
1041/* Must be called with dev->lock held */
1042static void gr_ep0out_requeue(struct gr_udc *dev)
1043{
1044 int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
1045
1046 if (ret)
1047 dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
1048 ret);
1049}
1050
1051/*
1052 * The main function dealing with setup requests on ep0.
1053 *
1054 * Must be called with dev->lock held and irqs disabled
1055 */
1056static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
1057 __releases(&dev->lock)
1058 __acquires(&dev->lock)
1059{
1060 union {
1061 struct usb_ctrlrequest ctrl;
1062 u8 raw[8];
1063 u32 word[2];
1064 } u;
1065 u8 type;
1066 u8 request;
1067 u16 value;
1068 u16 index;
1069 u16 length;
1070 int i;
1071 int status;
1072
1073 /* Restore from ep0 halt */
1074 if (dev->ep0state == GR_EP0_STALL) {
1075 gr_set_ep0state(dev, GR_EP0_SETUP);
1076 if (!req->req.actual)
1077 goto out;
1078 }
1079
1080 if (dev->ep0state == GR_EP0_ISTATUS) {
1081 gr_set_ep0state(dev, GR_EP0_SETUP);
1082 if (req->req.actual > 0)
1083 dev_dbg(dev->dev,
1084 "Unexpected setup packet at state %s\n",
1085 gr_ep0state_string(GR_EP0_ISTATUS));
1086 else
1087 goto out; /* Got expected ZLP */
1088 } else if (dev->ep0state != GR_EP0_SETUP) {
1089 dev_info(dev->dev,
1090 "Unexpected ep0out request at state %s - stalling\n",
1091 gr_ep0state_string(dev->ep0state));
1092 gr_control_stall(dev);
1093 gr_set_ep0state(dev, GR_EP0_SETUP);
1094 goto out;
1095 } else if (!req->req.actual) {
1096 dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
1097 gr_ep0state_string(dev->ep0state));
1098 goto out;
1099 }
1100
1101 /* Handle SETUP packet */
1102 for (i = 0; i < req->req.actual; i++)
1103 u.raw[i] = ((u8 *)req->req.buf)[i];
1104
1105 type = u.ctrl.bRequestType;
1106 request = u.ctrl.bRequest;
1107 value = le16_to_cpu(u.ctrl.wValue);
1108 index = le16_to_cpu(u.ctrl.wIndex);
1109 length = le16_to_cpu(u.ctrl.wLength);
1110
1111 gr_dbgprint_devreq(dev, type, request, value, index, length);
1112
1113 /* Check for data stage */
1114 if (length) {
1115 if (type & USB_DIR_IN)
1116 gr_set_ep0state(dev, GR_EP0_IDATA);
1117 else
1118 gr_set_ep0state(dev, GR_EP0_ODATA);
1119 }
1120
1121 status = 1; /* Positive status flags delegation */
1122 if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1123 switch (type & USB_RECIP_MASK) {
1124 case USB_RECIP_DEVICE:
1125 status = gr_device_request(dev, type, request,
1126 value, index);
1127 break;
1128 case USB_RECIP_ENDPOINT:
1129 status = gr_endpoint_request(dev, type, request,
1130 value, index);
1131 break;
1132 case USB_RECIP_INTERFACE:
1133 status = gr_interface_request(dev, type, request,
1134 value, index);
1135 break;
1136 }
1137 }
1138
1139 if (status > 0) {
1140 spin_unlock(&dev->lock);
1141
1142 dev_vdbg(dev->dev, "DELEGATE\n");
1143 status = dev->driver->setup(&dev->gadget, &u.ctrl);
1144
1145 spin_lock(&dev->lock);
1146 }
1147
1148 /* Generate STALL on both ep0out and ep0in if requested */
1149 if (unlikely(status < 0)) {
1150 dev_vdbg(dev->dev, "STALL\n");
1151 gr_control_stall(dev);
1152 }
1153
1154 if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
1155 request == USB_REQ_SET_CONFIGURATION) {
1156 if (!value) {
1157 dev_dbg(dev->dev, "STATUS: deconfigured\n");
1158 usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
1159 } else if (status >= 0) {
1160 /* Not configured unless gadget OK:s it */
1161 dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
1162 usb_gadget_set_state(&dev->gadget,
1163 USB_STATE_CONFIGURED);
1164 }
1165 }
1166
1167 /* Get ready for next stage */
1168 if (dev->ep0state == GR_EP0_ODATA)
1169 gr_set_ep0state(dev, GR_EP0_OSTATUS);
1170 else if (dev->ep0state == GR_EP0_IDATA)
1171 gr_set_ep0state(dev, GR_EP0_ISTATUS);
1172 else
1173 gr_set_ep0state(dev, GR_EP0_SETUP);
1174
1175out:
1176 gr_ep0out_requeue(dev);
1177}
1178
1179/* ---------------------------------------------------------------------- */
1180/* VBUS and USB reset handling */
1181
1182/* Must be called with dev->lock held and irqs disabled */
1183static void gr_vbus_connected(struct gr_udc *dev, u32 status)
1184{
1185 u32 control;
1186
1187 dev->gadget.speed = GR_SPEED(status);
1188 usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
1189
1190 /* Turn on full interrupts and pullup */
1191 control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
1192 GR_CONTROL_SP | GR_CONTROL_EP);
1193 gr_write32(&dev->regs->control, control);
1194}
1195
1196/* Must be called with dev->lock held */
1197static void gr_enable_vbus_detect(struct gr_udc *dev)
1198{
1199 u32 status;
1200
1201 dev->irq_enabled = 1;
1202 wmb(); /* Make sure we do not ignore an interrupt */
1203 gr_write32(&dev->regs->control, GR_CONTROL_VI);
1204
1205 /* Take care of the case we are already plugged in at this point */
1206 status = gr_read32(&dev->regs->status);
1207 if (status & GR_STATUS_VB)
1208 gr_vbus_connected(dev, status);
1209}
1210
1211/* Must be called with dev->lock held and irqs disabled */
1212static void gr_vbus_disconnected(struct gr_udc *dev)
1213{
1214 gr_stop_activity(dev);
1215
1216 /* Report disconnect */
1217 if (dev->driver && dev->driver->disconnect) {
1218 spin_unlock(&dev->lock);
1219
1220 dev->driver->disconnect(&dev->gadget);
1221
1222 spin_lock(&dev->lock);
1223 }
1224
1225 gr_enable_vbus_detect(dev);
1226}
1227
1228/* Must be called with dev->lock held and irqs disabled */
1229static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
1230{
1231 gr_set_address(dev, 0);
1232 gr_set_ep0state(dev, GR_EP0_SETUP);
1233 usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
1234 dev->gadget.speed = GR_SPEED(status);
1235
1236 gr_ep_nuke(&dev->epo[0]);
1237 gr_ep_nuke(&dev->epi[0]);
1238 dev->epo[0].stopped = 0;
1239 dev->epi[0].stopped = 0;
1240 gr_ep0out_requeue(dev);
1241}
1242
1243/* ---------------------------------------------------------------------- */
1244/* Irq handling */
1245
1246/*
1247 * Handles interrupts from in endpoints. Returns whether something was handled.
1248 *
1249 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1250 */
1251static int gr_handle_in_ep(struct gr_ep *ep)
1252{
1253 struct gr_request *req;
1254
1255 req = list_first_entry(&ep->queue, struct gr_request, queue);
1256 if (!req->last_desc)
1257 return 0;
1258
1259 if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
1260 return 0; /* Not put in hardware buffers yet */
1261
1262 if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
1263 return 0; /* Not transmitted yet, still in hardware buffers */
1264
1265 /* Write complete */
1266 gr_dma_advance(ep, 0);
1267
1268 return 1;
1269}
1270
1271/*
1272 * Handles interrupts from out endpoints. Returns whether something was handled.
1273 *
1274 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1275 */
1276static int gr_handle_out_ep(struct gr_ep *ep)
1277{
1278 u32 ep_dmactrl;
1279 u32 ctrl;
1280 u16 len;
1281 struct gr_request *req;
1282 struct gr_udc *dev = ep->dev;
1283
1284 req = list_first_entry(&ep->queue, struct gr_request, queue);
1285 if (!req->curr_desc)
1286 return 0;
1287
1288 ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
1289 if (ctrl & GR_DESC_OUT_CTRL_EN)
1290 return 0; /* Not received yet */
1291
1292 /* Read complete */
1293 len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
1294 req->req.actual += len;
1295 if (ctrl & GR_DESC_OUT_CTRL_SE)
1296 req->setup = 1;
1297
1298 if (len < ep->ep.maxpacket || req->req.actual == req->req.length) {
1299 /* Short packet or the expected size - we are done */
1300
1301 if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
1302 /*
1303 * Send a status stage ZLP to ack the DATA stage in the
1304 * OUT direction. This needs to be done before
1305 * gr_dma_advance as that can lead to a call to
1306 * ep0_setup that can change dev->ep0state.
1307 */
1308 gr_ep0_respond_empty(dev);
1309 gr_set_ep0state(dev, GR_EP0_SETUP);
1310 }
1311
1312 gr_dma_advance(ep, 0);
1313 } else {
1314 /* Not done yet. Enable the next descriptor to receive more. */
1315 req->curr_desc = req->curr_desc->next_desc;
1316 req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
1317
1318 ep_dmactrl = gr_read32(&ep->regs->dmactrl);
1319 gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
1320 }
1321
1322 return 1;
1323}
1324
1325/*
1326 * Handle state changes. Returns whether something was handled.
1327 *
1328 * Must be called with dev->lock held and irqs disabled.
1329 */
1330static int gr_handle_state_changes(struct gr_udc *dev)
1331{
1332 u32 status = gr_read32(&dev->regs->status);
1333 int handled = 0;
1334 int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
1335 dev->gadget.state == USB_STATE_ATTACHED);
1336
1337 /* VBUS valid detected */
1338 if (!powstate && (status & GR_STATUS_VB)) {
1339 dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
1340 gr_vbus_connected(dev, status);
1341 handled = 1;
1342 }
1343
1344 /* Disconnect */
1345 if (powstate && !(status & GR_STATUS_VB)) {
1346 dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
1347 gr_vbus_disconnected(dev);
1348 handled = 1;
1349 }
1350
1351 /* USB reset detected */
1352 if (status & GR_STATUS_UR) {
1353 dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
1354 GR_SPEED_STR(status));
1355 gr_write32(&dev->regs->status, GR_STATUS_UR);
1356 gr_udc_usbreset(dev, status);
1357 handled = 1;
1358 }
1359
1360 /* Speed change */
1361 if (dev->gadget.speed != GR_SPEED(status)) {
1362 dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
1363 GR_SPEED_STR(status));
1364 dev->gadget.speed = GR_SPEED(status);
1365 handled = 1;
1366 }
1367
1368 /* Going into suspend */
1369 if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
1370 dev_dbg(dev->dev, "STATUS: USB suspend\n");
1371 gr_set_ep0state(dev, GR_EP0_SUSPEND);
1372 dev->suspended_from = dev->gadget.state;
1373 usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
1374
1375 if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1376 dev->driver && dev->driver->suspend) {
1377 spin_unlock(&dev->lock);
1378
1379 dev->driver->suspend(&dev->gadget);
1380
1381 spin_lock(&dev->lock);
1382 }
1383 handled = 1;
1384 }
1385
1386 /* Coming out of suspend */
1387 if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
1388 dev_dbg(dev->dev, "STATUS: USB resume\n");
1389 if (dev->suspended_from == USB_STATE_POWERED)
1390 gr_set_ep0state(dev, GR_EP0_DISCONNECT);
1391 else
1392 gr_set_ep0state(dev, GR_EP0_SETUP);
1393 usb_gadget_set_state(&dev->gadget, dev->suspended_from);
1394
1395 if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1396 dev->driver && dev->driver->resume) {
1397 spin_unlock(&dev->lock);
1398
1399 dev->driver->resume(&dev->gadget);
1400
1401 spin_lock(&dev->lock);
1402 }
1403 handled = 1;
1404 }
1405
1406 return handled;
1407}
1408
1409/* Non-interrupt context irq handler */
1410static irqreturn_t gr_irq_handler(int irq, void *_dev)
1411{
1412 struct gr_udc *dev = _dev;
1413 struct gr_ep *ep;
1414 int handled = 0;
1415 int i;
1416 unsigned long flags;
1417
1418 spin_lock_irqsave(&dev->lock, flags);
1419
1420 if (!dev->irq_enabled)
1421 goto out;
1422
1423 /*
1424 * Check IN ep interrupts. We check these before the OUT eps because
1425 * some gadgets reuse the request that might already be currently
1426 * outstanding and needs to be completed (mainly setup requests).
1427 */
1428 for (i = 0; i < dev->nepi; i++) {
1429 ep = &dev->epi[i];
1430 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1431 handled = gr_handle_in_ep(ep) || handled;
1432 }
1433
1434 /* Check OUT ep interrupts */
1435 for (i = 0; i < dev->nepo; i++) {
1436 ep = &dev->epo[i];
1437 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1438 handled = gr_handle_out_ep(ep) || handled;
1439 }
1440
1441 /* Check status interrupts */
1442 handled = gr_handle_state_changes(dev) || handled;
1443
1444 /*
1445 * Check AMBA DMA errors. Only check if we didn't find anything else to
1446 * handle because this shouldn't happen if we did everything right.
1447 */
1448 if (!handled) {
1449 list_for_each_entry(ep, &dev->ep_list, ep_list) {
1450 if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
1451 dev_err(dev->dev,
1452 "AMBA Error occurred for %s\n",
1453 ep->ep.name);
1454 handled = 1;
1455 }
1456 }
1457 }
1458
1459out:
1460 spin_unlock_irqrestore(&dev->lock, flags);
1461
1462 return handled ? IRQ_HANDLED : IRQ_NONE;
1463}
1464
1465/* Interrupt context irq handler */
1466static irqreturn_t gr_irq(int irq, void *_dev)
1467{
1468 struct gr_udc *dev = _dev;
1469
1470 if (!dev->irq_enabled)
1471 return IRQ_NONE;
1472
1473 return IRQ_WAKE_THREAD;
1474}
1475
1476/* ---------------------------------------------------------------------- */
1477/* USB ep ops */
1478
1479/* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
1480static int gr_ep_enable(struct usb_ep *_ep,
1481 const struct usb_endpoint_descriptor *desc)
1482{
1483 struct gr_udc *dev;
1484 struct gr_ep *ep;
1485 u8 mode;
1486 u8 nt;
1487 u16 max;
1488 u16 buffer_size = 0;
1489 u32 epctrl;
1490
1491 ep = container_of(_ep, struct gr_ep, ep);
1492 if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
1493 return -EINVAL;
1494
1495 dev = ep->dev;
1496
1497 /* 'ep0' IN and OUT are reserved */
1498 if (ep == &dev->epo[0] || ep == &dev->epi[0])
1499 return -EINVAL;
1500
1501 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1502 return -ESHUTDOWN;
1503
1504 /* Make sure we are clear for enabling */
1505 epctrl = gr_read32(&ep->regs->epctrl);
1506 if (epctrl & GR_EPCTRL_EV)
1507 return -EBUSY;
1508
1509 /* Check that directions match */
1510 if (!ep->is_in != !usb_endpoint_dir_in(desc))
1511 return -EINVAL;
1512
1513 /* Check ep num */
1514 if ((!ep->is_in && ep->num >= dev->nepo) ||
1515 (ep->is_in && ep->num >= dev->nepi))
1516 return -EINVAL;
1517
1518 if (usb_endpoint_xfer_control(desc)) {
1519 mode = 0;
1520 } else if (usb_endpoint_xfer_isoc(desc)) {
1521 mode = 1;
1522 } else if (usb_endpoint_xfer_bulk(desc)) {
1523 mode = 2;
1524 } else if (usb_endpoint_xfer_int(desc)) {
1525 mode = 3;
1526 } else {
1527 dev_err(dev->dev, "Unknown transfer type for %s\n",
1528 ep->ep.name);
1529 return -EINVAL;
1530 }
1531
1532 /*
1533 * Bits 10-0 set the max payload. 12-11 set the number of
1534 * additional transactions.
1535 */
1536 max = 0x7ff & usb_endpoint_maxp(desc);
1537 nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
1538 buffer_size = GR_BUFFER_SIZE(epctrl);
1539 if (nt && (mode == 0 || mode == 2)) {
1540 dev_err(dev->dev,
1541 "%s mode: multiple trans./microframe not valid\n",
1542 (mode == 2 ? "Bulk" : "Control"));
1543 return -EINVAL;
1544 } else if (nt == 0x11) {
1545 dev_err(dev->dev, "Invalid value for trans./microframe\n");
1546 return -EINVAL;
1547 } else if ((nt + 1) * max > buffer_size) {
1548 dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
1549 buffer_size, (nt + 1), max);
1550 return -EINVAL;
1551 } else if (max == 0) {
1552 dev_err(dev->dev, "Max payload cannot be set to 0\n");
1553 return -EINVAL;
1554 }
1555
1556 spin_lock(&ep->dev->lock);
1557
1558 if (!ep->stopped) {
1559 spin_unlock(&ep->dev->lock);
1560 return -EBUSY;
1561 }
1562
1563 ep->stopped = 0;
1564 ep->wedged = 0;
1565 ep->ep.desc = desc;
1566 ep->ep.maxpacket = max;
1567 ep->dma_start = 0;
1568
1569
1570 if (nt) {
1571 /*
1572 * Maximum possible size of all payloads in one microframe
1573 * regardless of direction when using high-bandwidth mode.
1574 */
1575 ep->bytes_per_buffer = (nt + 1) * max;
1576 } else if (ep->is_in) {
1577 /*
1578 * The biggest multiple of maximum packet size that fits into
1579 * the buffer. The hardware will split up into many packets in
1580 * the IN direction.
1581 */
1582 ep->bytes_per_buffer = (buffer_size / max) * max;
1583 } else {
1584 /*
1585 * Only single packets will be placed the buffers in the OUT
1586 * direction.
1587 */
1588 ep->bytes_per_buffer = max;
1589 }
1590
1591 epctrl = (max << GR_EPCTRL_MAXPL_POS)
1592 | (nt << GR_EPCTRL_NT_POS)
1593 | (mode << GR_EPCTRL_TT_POS)
1594 | GR_EPCTRL_EV;
1595 if (ep->is_in)
1596 epctrl |= GR_EPCTRL_PI;
1597 gr_write32(&ep->regs->epctrl, epctrl);
1598
1599 gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
1600
1601 spin_unlock(&ep->dev->lock);
1602
1603 dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
1604 ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
1605 return 0;
1606}
1607
1608/* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
1609static int gr_ep_disable(struct usb_ep *_ep)
1610{
1611 struct gr_ep *ep;
1612 struct gr_udc *dev;
1613 unsigned long flags;
1614
1615 ep = container_of(_ep, struct gr_ep, ep);
1616 if (!_ep || !ep->ep.desc)
1617 return -ENODEV;
1618
1619 dev = ep->dev;
1620
1621 /* 'ep0' IN and OUT are reserved */
1622 if (ep == &dev->epo[0] || ep == &dev->epi[0])
1623 return -EINVAL;
1624
1625 if (dev->ep0state == GR_EP0_SUSPEND)
1626 return -EBUSY;
1627
1628 dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
1629
1630 spin_lock_irqsave(&dev->lock, flags);
1631
1632 gr_ep_nuke(ep);
1633 gr_ep_reset(ep);
1634 ep->ep.desc = NULL;
1635
1636 spin_unlock_irqrestore(&dev->lock, flags);
1637
1638 return 0;
1639}
1640
1641/*
1642 * Frees a request, but not any DMA buffers associated with it
1643 * (gr_finish_request should already have taken care of that).
1644 */
1645static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
1646{
1647 struct gr_request *req;
1648
1649 if (!_ep || !_req)
1650 return;
1651 req = container_of(_req, struct gr_request, req);
1652
1653 /* Leads to memory leak */
1654 WARN(!list_empty(&req->queue),
1655 "request not dequeued properly before freeing\n");
1656
1657 kfree(req);
1658}
1659
1660/* Queue a request from the gadget */
1661static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
1662 gfp_t gfp_flags)
1663{
1664 struct gr_ep *ep;
1665 struct gr_request *req;
1666 struct gr_udc *dev;
1667 int ret;
1668
1669 if (unlikely(!_ep || !_req))
1670 return -EINVAL;
1671
1672 ep = container_of(_ep, struct gr_ep, ep);
1673 req = container_of(_req, struct gr_request, req);
1674 dev = ep->dev;
1675
1676 spin_lock(&ep->dev->lock);
1677
1678 /*
1679 * The ep0 pointer in the gadget struct is used both for ep0in and
1680 * ep0out. In a data stage in the out direction ep0out needs to be used
1681 * instead of the default ep0in. Completion functions might use
1682 * driver_data, so that needs to be copied as well.
1683 */
1684 if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
1685 ep = &dev->epo[0];
1686 ep->ep.driver_data = dev->epi[0].ep.driver_data;
1687 }
1688
1689 if (ep->is_in)
1690 gr_dbgprint_request("EXTERN", ep, req);
1691
1692 ret = gr_queue(ep, req, gfp_flags);
1693
1694 spin_unlock(&ep->dev->lock);
1695
1696 return ret;
1697}
1698
1699/* Dequeue JUST ONE request */
1700static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1701{
1702 struct gr_request *req;
1703 struct gr_ep *ep;
1704 struct gr_udc *dev;
1705 int ret = 0;
1706 unsigned long flags;
1707
1708 ep = container_of(_ep, struct gr_ep, ep);
1709 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
1710 return -EINVAL;
1711 dev = ep->dev;
1712 if (!dev->driver)
1713 return -ESHUTDOWN;
1714
1715 /* We can't touch (DMA) registers when suspended */
1716 if (dev->ep0state == GR_EP0_SUSPEND)
1717 return -EBUSY;
1718
1719 spin_lock_irqsave(&dev->lock, flags);
1720
1721 /* Make sure it's actually queued on this endpoint */
1722 list_for_each_entry(req, &ep->queue, queue) {
1723 if (&req->req == _req)
1724 break;
1725 }
1726 if (&req->req != _req) {
1727 ret = -EINVAL;
1728 goto out;
1729 }
1730
1731 if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
1732 /* This request is currently being processed */
1733 gr_abort_dma(ep);
1734 if (ep->stopped)
1735 gr_finish_request(ep, req, -ECONNRESET);
1736 else
1737 gr_dma_advance(ep, -ECONNRESET);
1738 } else if (!list_empty(&req->queue)) {
1739 /* Not being processed - gr_finish_request dequeues it */
1740 gr_finish_request(ep, req, -ECONNRESET);
1741 } else {
1742 ret = -EOPNOTSUPP;
1743 }
1744
1745out:
1746 spin_unlock_irqrestore(&dev->lock, flags);
1747
1748 return ret;
1749}
1750
1751/* Helper for gr_set_halt and gr_set_wedge */
1752static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
1753{
1754 int ret;
1755 struct gr_ep *ep;
1756
1757 if (!_ep)
1758 return -ENODEV;
1759 ep = container_of(_ep, struct gr_ep, ep);
1760
1761 spin_lock(&ep->dev->lock);
1762
1763 /* Halting an IN endpoint should fail if queue is not empty */
1764 if (halt && ep->is_in && !list_empty(&ep->queue)) {
1765 ret = -EAGAIN;
1766 goto out;
1767 }
1768
1769 ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
1770
1771out:
1772 spin_unlock(&ep->dev->lock);
1773
1774 return ret;
1775}
1776
1777/* Halt endpoint */
1778static int gr_set_halt(struct usb_ep *_ep, int halt)
1779{
1780 return gr_set_halt_wedge(_ep, halt, 0);
1781}
1782
1783/* Halt and wedge endpoint */
1784static int gr_set_wedge(struct usb_ep *_ep)
1785{
1786 return gr_set_halt_wedge(_ep, 1, 1);
1787}
1788
1789/*
1790 * Return the total number of bytes currently stored in the internal buffers of
1791 * the endpoint.
1792 */
1793static int gr_fifo_status(struct usb_ep *_ep)
1794{
1795 struct gr_ep *ep;
1796 u32 epstat;
1797 u32 bytes = 0;
1798
1799 if (!_ep)
1800 return -ENODEV;
1801 ep = container_of(_ep, struct gr_ep, ep);
1802
1803 epstat = gr_read32(&ep->regs->epstat);
1804
1805 if (epstat & GR_EPSTAT_B0)
1806 bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
1807 if (epstat & GR_EPSTAT_B1)
1808 bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
1809
1810 return bytes;
1811}
1812
1813
1814/* Empty data from internal buffers of an endpoint. */
1815static void gr_fifo_flush(struct usb_ep *_ep)
1816{
1817 struct gr_ep *ep;
1818 u32 epctrl;
1819
1820 if (!_ep)
1821 return;
1822 ep = container_of(_ep, struct gr_ep, ep);
1823 dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
1824
1825 spin_lock(&ep->dev->lock);
1826
1827 epctrl = gr_read32(&ep->regs->epctrl);
1828 epctrl |= GR_EPCTRL_CB;
1829 gr_write32(&ep->regs->epctrl, epctrl);
1830
1831 spin_unlock(&ep->dev->lock);
1832}
1833
1834static struct usb_ep_ops gr_ep_ops = {
1835 .enable = gr_ep_enable,
1836 .disable = gr_ep_disable,
1837
1838 .alloc_request = gr_alloc_request,
1839 .free_request = gr_free_request,
1840
1841 .queue = gr_queue_ext,
1842 .dequeue = gr_dequeue,
1843
1844 .set_halt = gr_set_halt,
1845 .set_wedge = gr_set_wedge,
1846 .fifo_status = gr_fifo_status,
1847 .fifo_flush = gr_fifo_flush,
1848};
1849
1850/* ---------------------------------------------------------------------- */
1851/* USB Gadget ops */
1852
1853static int gr_get_frame(struct usb_gadget *_gadget)
1854{
1855 struct gr_udc *dev;
1856
1857 if (!_gadget)
1858 return -ENODEV;
1859 dev = container_of(_gadget, struct gr_udc, gadget);
1860 return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
1861}
1862
1863static int gr_wakeup(struct usb_gadget *_gadget)
1864{
1865 struct gr_udc *dev;
1866
1867 if (!_gadget)
1868 return -ENODEV;
1869 dev = container_of(_gadget, struct gr_udc, gadget);
1870
1871 /* Remote wakeup feature not enabled by host*/
1872 if (!dev->remote_wakeup)
1873 return -EINVAL;
1874
1875 spin_lock(&dev->lock);
1876
1877 gr_write32(&dev->regs->control,
1878 gr_read32(&dev->regs->control) | GR_CONTROL_RW);
1879
1880 spin_unlock(&dev->lock);
1881
1882 return 0;
1883}
1884
1885static int gr_pullup(struct usb_gadget *_gadget, int is_on)
1886{
1887 struct gr_udc *dev;
1888 u32 control;
1889
1890 if (!_gadget)
1891 return -ENODEV;
1892 dev = container_of(_gadget, struct gr_udc, gadget);
1893
1894 spin_lock(&dev->lock);
1895
1896 control = gr_read32(&dev->regs->control);
1897 if (is_on)
1898 control |= GR_CONTROL_EP;
1899 else
1900 control &= ~GR_CONTROL_EP;
1901 gr_write32(&dev->regs->control, control);
1902
1903 spin_unlock(&dev->lock);
1904
1905 return 0;
1906}
1907
1908static int gr_udc_start(struct usb_gadget *gadget,
1909 struct usb_gadget_driver *driver)
1910{
1911 struct gr_udc *dev = to_gr_udc(gadget);
1912
1913 spin_lock(&dev->lock);
1914
1915 /* Hook up the driver */
1916 driver->driver.bus = NULL;
1917 dev->driver = driver;
1918
1919 /* Get ready for host detection */
1920 gr_enable_vbus_detect(dev);
1921
1922 spin_unlock(&dev->lock);
1923
1924 dev_info(dev->dev, "Started with gadget driver '%s'\n",
1925 driver->driver.name);
1926
1927 return 0;
1928}
1929
1930static int gr_udc_stop(struct usb_gadget *gadget,
1931 struct usb_gadget_driver *driver)
1932{
1933 struct gr_udc *dev = to_gr_udc(gadget);
1934 unsigned long flags;
1935
1936 spin_lock_irqsave(&dev->lock, flags);
1937
1938 dev->driver = NULL;
1939 gr_stop_activity(dev);
1940
1941 spin_unlock_irqrestore(&dev->lock, flags);
1942
1943 dev_info(dev->dev, "Stopped\n");
1944
1945 return 0;
1946}
1947
1948static const struct usb_gadget_ops gr_ops = {
1949 .get_frame = gr_get_frame,
1950 .wakeup = gr_wakeup,
1951 .pullup = gr_pullup,
1952 .udc_start = gr_udc_start,
1953 .udc_stop = gr_udc_stop,
1954 /* Other operations not supported */
1955};
1956
1957/* ---------------------------------------------------------------------- */
1958/* Module probe, removal and of-matching */
1959
1960static const char * const onames[] = {
1961 "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
1962 "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
1963 "ep12out", "ep13out", "ep14out", "ep15out"
1964};
1965
1966static const char * const inames[] = {
1967 "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
1968 "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
1969 "ep12in", "ep13in", "ep14in", "ep15in"
1970};
1971
1972/* Must be called with dev->lock held */
1973static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
1974{
1975 struct gr_ep *ep;
1976 struct gr_request *req;
1977 struct usb_request *_req;
1978 void *buf;
1979
1980 if (is_in) {
1981 ep = &dev->epi[num];
1982 ep->ep.name = inames[num];
1983 ep->regs = &dev->regs->epi[num];
1984 } else {
1985 ep = &dev->epo[num];
1986 ep->ep.name = onames[num];
1987 ep->regs = &dev->regs->epo[num];
1988 }
1989
1990 gr_ep_reset(ep);
1991 ep->num = num;
1992 ep->is_in = is_in;
1993 ep->dev = dev;
1994 ep->ep.ops = &gr_ep_ops;
1995 INIT_LIST_HEAD(&ep->queue);
1996
1997 if (num == 0) {
1998 _req = gr_alloc_request(&ep->ep, GFP_KERNEL);
1999 buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_KERNEL);
2000 if (!_req || !buf) {
2001 /* possible _req freed by gr_probe via gr_remove */
2002 return -ENOMEM;
2003 }
2004
2005 req = container_of(_req, struct gr_request, req);
2006 req->req.buf = buf;
2007 req->req.length = MAX_CTRL_PL_SIZE;
2008
2009 if (is_in)
2010 dev->ep0reqi = req; /* Complete gets set as used */
2011 else
2012 dev->ep0reqo = req; /* Completion treated separately */
2013
2014 usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
2015 ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
2016 } else {
2017 usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
2018 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2019 }
2020 list_add_tail(&ep->ep_list, &dev->ep_list);
2021
2022 return 0;
2023}
2024
2025/* Must be called with dev->lock held */
2026static int gr_udc_init(struct gr_udc *dev)
2027{
2028 struct device_node *np = dev->dev->of_node;
2029 u32 epctrl_val;
2030 u32 dmactrl_val;
2031 int i;
2032 int ret = 0;
2033 u32 *bufsizes;
2034 u32 bufsize;
2035 int len;
2036
2037 gr_set_address(dev, 0);
2038
2039 INIT_LIST_HEAD(&dev->gadget.ep_list);
2040 dev->gadget.speed = USB_SPEED_UNKNOWN;
2041 dev->gadget.ep0 = &dev->epi[0].ep;
2042
2043 INIT_LIST_HEAD(&dev->ep_list);
2044 gr_set_ep0state(dev, GR_EP0_DISCONNECT);
2045
2046 bufsizes = (u32 *)of_get_property(np, "epobufsizes", &len);
2047 len /= sizeof(u32);
2048 for (i = 0; i < dev->nepo; i++) {
2049 bufsize = (bufsizes && i < len) ? bufsizes[i] : 1024;
2050 ret = gr_ep_init(dev, i, 0, bufsize);
2051 if (ret)
2052 return ret;
2053 }
2054
2055 bufsizes = (u32 *)of_get_property(np, "epibufsizes", &len);
2056 len /= sizeof(u32);
2057 for (i = 0; i < dev->nepi; i++) {
2058 bufsize = (bufsizes && i < len) ? bufsizes[i] : 1024;
2059 ret = gr_ep_init(dev, i, 1, bufsize);
2060 if (ret)
2061 return ret;
2062 }
2063
2064 /* Must be disabled by default */
2065 dev->remote_wakeup = 0;
2066
2067 /* Enable ep0out and ep0in */
2068 epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
2069 dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
2070 gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
2071 gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
2072 gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
2073 gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
2074
2075 return 0;
2076}
2077
2078static int gr_remove(struct platform_device *ofdev)
2079{
2080 struct gr_udc *dev = dev_get_drvdata(&ofdev->dev);
2081
2082 if (dev->added)
2083 usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
2084 if (dev->driver)
2085 return -EBUSY;
2086
2087 gr_dfs_delete(dev);
2088 if (dev->desc_pool)
2089 dma_pool_destroy(dev->desc_pool);
2090 dev_set_drvdata(&ofdev->dev, NULL);
2091
2092 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
2093 gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
2094
2095 return 0;
2096}
2097static int gr_request_irq(struct gr_udc *dev, int irq)
2098{
2099 return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
2100 IRQF_SHARED, driver_name, dev);
2101}
2102
2103static int gr_probe(struct platform_device *ofdev)
2104{
2105 struct gr_udc *dev;
2106 struct resource *res;
2107 struct gr_regs __iomem *regs;
2108 int retval;
2109 u32 status;
2110
2111 dev = devm_kzalloc(&ofdev->dev, sizeof(*dev), GFP_KERNEL);
2112 if (!dev)
2113 return -ENOMEM;
2114 dev->dev = &ofdev->dev;
2115
2116 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
2117 regs = devm_ioremap_resource(dev->dev, res);
2118 if (IS_ERR(regs))
2119 return PTR_ERR(regs);
2120
2121 dev->irq = irq_of_parse_and_map(dev->dev->of_node, 0);
2122 if (!dev->irq) {
2123 dev_err(dev->dev, "No irq found\n");
2124 return -ENODEV;
2125 }
2126
2127 /* Some core configurations has separate irqs for IN and OUT events */
2128 dev->irqi = irq_of_parse_and_map(dev->dev->of_node, 1);
2129 if (dev->irqi) {
2130 dev->irqo = irq_of_parse_and_map(dev->dev->of_node, 2);
2131 if (!dev->irqo) {
2132 dev_err(dev->dev, "Found irqi but not irqo\n");
2133 return -ENODEV;
2134 }
2135 }
2136
2137 dev->gadget.name = driver_name;
2138 dev->gadget.max_speed = USB_SPEED_HIGH;
2139 dev->gadget.ops = &gr_ops;
2140 dev->gadget.quirk_ep_out_aligned_size = true;
2141
2142 spin_lock_init(&dev->lock);
2143 dev->regs = regs;
2144
2145 dev_set_drvdata(&ofdev->dev, dev);
2146
2147 /* Determine number of endpoints and data interface mode */
2148 status = gr_read32(&dev->regs->status);
2149 dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
2150 dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
2151
2152 if (!(status & GR_STATUS_DM)) {
2153 dev_err(dev->dev, "Slave mode cores are not supported\n");
2154 return -ENODEV;
2155 }
2156
2157 /* --- Effects of the following calls might need explicit cleanup --- */
2158
2159 /* Create DMA pool for descriptors */
2160 dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
2161 sizeof(struct gr_dma_desc), 4, 0);
2162 if (!dev->desc_pool) {
2163 dev_err(dev->dev, "Could not allocate DMA pool");
2164 return -ENOMEM;
2165 }
2166
2167 spin_lock(&dev->lock);
2168
2169 /* Inside lock so that no gadget can use this udc until probe is done */
2170 retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
2171 if (retval) {
2172 dev_err(dev->dev, "Could not add gadget udc");
2173 goto out;
2174 }
2175 dev->added = 1;
2176
2177 retval = gr_udc_init(dev);
2178 if (retval)
2179 goto out;
2180
2181 gr_dfs_create(dev);
2182
2183 /* Clear all interrupt enables that might be left on since last boot */
2184 gr_disable_interrupts_and_pullup(dev);
2185
2186 retval = gr_request_irq(dev, dev->irq);
2187 if (retval) {
2188 dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
2189 goto out;
2190 }
2191
2192 if (dev->irqi) {
2193 retval = gr_request_irq(dev, dev->irqi);
2194 if (retval) {
2195 dev_err(dev->dev, "Failed to request irqi %d\n",
2196 dev->irqi);
2197 goto out;
2198 }
2199 retval = gr_request_irq(dev, dev->irqo);
2200 if (retval) {
2201 dev_err(dev->dev, "Failed to request irqo %d\n",
2202 dev->irqo);
2203 goto out;
2204 }
2205 }
2206
2207 if (dev->irqi)
2208 dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
2209 dev->irq, dev->irqi, dev->irqo);
2210 else
2211 dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
2212
2213out:
2214 spin_unlock(&dev->lock);
2215
2216 if (retval)
2217 gr_remove(ofdev);
2218
2219 return retval;
2220}
2221
2222static struct of_device_id gr_match[] = {
2223 {.name = "GAISLER_USBDC"},
2224 {.name = "01_021"},
2225 {},
2226};
2227MODULE_DEVICE_TABLE(of, gr_match);
2228
2229static struct platform_driver gr_driver = {
2230 .driver = {
2231 .name = DRIVER_NAME,
2232 .owner = THIS_MODULE,
2233 .of_match_table = gr_match,
2234 },
2235 .probe = gr_probe,
2236 .remove = gr_remove,
2237};
2238module_platform_driver(gr_driver);
2239
2240MODULE_AUTHOR("Aeroflex Gaisler AB.");
2241MODULE_DESCRIPTION(DRIVER_DESC);
2242MODULE_LICENSE("GPL");