blob: 6a0b957af335abfd3d038f4246bfdc63eb08d722 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/usb/gadget/pxa2xx_udc.c
David Brownell91987692005-05-07 13:20:19 -07003 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
6 * Copyright (C) 2003 Robert Schwebel, Pengutronix
7 * Copyright (C) 2003 Benedikt Spranger, Pengutronix
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003 Joshua Wise
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 */
26
27#undef DEBUG
28// #define VERBOSE DBG_VERBOSE
29
30#include <linux/config.h>
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/ioport.h>
34#include <linux/types.h>
35#include <linux/version.h>
36#include <linux/errno.h>
37#include <linux/delay.h>
38#include <linux/sched.h>
39#include <linux/slab.h>
40#include <linux/init.h>
41#include <linux/timer.h>
42#include <linux/list.h>
43#include <linux/interrupt.h>
44#include <linux/proc_fs.h>
45#include <linux/mm.h>
46#include <linux/device.h>
47#include <linux/dma-mapping.h>
48
49#include <asm/byteorder.h>
50#include <asm/dma.h>
51#include <asm/io.h>
52#include <asm/irq.h>
53#include <asm/system.h>
54#include <asm/mach-types.h>
55#include <asm/unaligned.h>
56#include <asm/hardware.h>
57#include <asm/arch/pxa-regs.h>
58
59#include <linux/usb_ch9.h>
60#include <linux/usb_gadget.h>
61
62#include <asm/arch/udc.h>
63
64
65/*
David Brownell91987692005-05-07 13:20:19 -070066 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 * series processors. The UDC for the IXP 4xx series is very similar.
68 * There are fifteen endpoints, in addition to ep0.
69 *
70 * Such controller drivers work with a gadget driver. The gadget driver
71 * returns descriptors, implements configuration and data protocols used
72 * by the host to interact with this device, and allocates endpoints to
73 * the different protocol interfaces. The controller driver virtualizes
74 * usb hardware so that the gadget drivers will be more portable.
75 *
76 * This UDC hardware wants to implement a bit too much USB protocol, so
77 * it constrains the sorts of USB configuration change events that work.
78 * The errata for these chips are misleading; some "fixed" bugs from
79 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
80 */
81
David Brownell91987692005-05-07 13:20:19 -070082#define DRIVER_VERSION "4-May-2005"
83#define DRIVER_DESC "PXA 25x USB Device Controller driver"
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85
86static const char driver_name [] = "pxa2xx_udc";
87
88static const char ep0name [] = "ep0";
89
90
91// #define USE_DMA
92// #define USE_OUT_DMA
93// #define DISABLE_TEST_MODE
94
95#ifdef CONFIG_ARCH_IXP4XX
96#undef USE_DMA
97
98/* cpu-specific register addresses are compiled in to this code */
99#ifdef CONFIG_ARCH_PXA
100#error "Can't configure both IXP and PXA"
101#endif
102
103#endif
104
105#include "pxa2xx_udc.h"
106
107
108#ifdef USE_DMA
109static int use_dma = 1;
110module_param(use_dma, bool, 0);
111MODULE_PARM_DESC (use_dma, "true to use dma");
112
113static void dma_nodesc_handler (int dmach, void *_ep, struct pt_regs *r);
114static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
115
116#ifdef USE_OUT_DMA
117#define DMASTR " (dma support)"
118#else
119#define DMASTR " (dma in)"
120#endif
121
122#else /* !USE_DMA */
123#define DMASTR " (pio only)"
124#undef USE_OUT_DMA
125#endif
126
127#ifdef CONFIG_USB_PXA2XX_SMALL
128#define SIZE_STR " (small)"
129#else
130#define SIZE_STR ""
131#endif
132
133#ifdef DISABLE_TEST_MODE
134/* (mode == 0) == no undocumented chip tweaks
135 * (mode & 1) == double buffer bulk IN
136 * (mode & 2) == double buffer bulk OUT
137 * ... so mode = 3 (or 7, 15, etc) does it for both
138 */
139static ushort fifo_mode = 0;
140module_param(fifo_mode, ushort, 0);
141MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
142#endif
143
144/* ---------------------------------------------------------------------------
145 * endpoint related parts of the api to the usb controller hardware,
146 * used by gadget driver; and the inner talker-to-hardware core.
147 * ---------------------------------------------------------------------------
148 */
149
150static void pxa2xx_ep_fifo_flush (struct usb_ep *ep);
151static void nuke (struct pxa2xx_ep *, int status);
152
153static void pio_irq_enable(int bEndpointAddress)
154{
155 bEndpointAddress &= 0xf;
156 if (bEndpointAddress < 8)
157 UICR0 &= ~(1 << bEndpointAddress);
158 else {
159 bEndpointAddress -= 8;
160 UICR1 &= ~(1 << bEndpointAddress);
161 }
162}
163
164static void pio_irq_disable(int bEndpointAddress)
165{
166 bEndpointAddress &= 0xf;
167 if (bEndpointAddress < 8)
168 UICR0 |= 1 << bEndpointAddress;
169 else {
170 bEndpointAddress -= 8;
171 UICR1 |= 1 << bEndpointAddress;
172 }
173}
174
175/* The UDCCR reg contains mask and interrupt status bits,
176 * so using '|=' isn't safe as it may ack an interrupt.
177 */
178#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
179
180static inline void udc_set_mask_UDCCR(int mask)
181{
182 UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
183}
184
185static inline void udc_clear_mask_UDCCR(int mask)
186{
187 UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
188}
189
190static inline void udc_ack_int_UDCCR(int mask)
191{
192 /* udccr contains the bits we dont want to change */
193 __u32 udccr = UDCCR & UDCCR_MASK_BITS;
194
195 UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
196}
197
198/*
199 * endpoint enable/disable
200 *
201 * we need to verify the descriptors used to enable endpoints. since pxa2xx
202 * endpoint configurations are fixed, and are pretty much always enabled,
203 * there's not a lot to manage here.
204 *
205 * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints,
206 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
207 * for a single interface (with only the default altsetting) and for gadget
208 * drivers that don't halt endpoints (not reset by set_interface). that also
209 * means that if you use ISO, you must violate the USB spec rule that all
210 * iso endpoints must be in non-default altsettings.
211 */
212static int pxa2xx_ep_enable (struct usb_ep *_ep,
213 const struct usb_endpoint_descriptor *desc)
214{
215 struct pxa2xx_ep *ep;
216 struct pxa2xx_udc *dev;
217
218 ep = container_of (_ep, struct pxa2xx_ep, ep);
219 if (!_ep || !desc || ep->desc || _ep->name == ep0name
220 || desc->bDescriptorType != USB_DT_ENDPOINT
221 || ep->bEndpointAddress != desc->bEndpointAddress
222 || ep->fifo_size < le16_to_cpu
223 (desc->wMaxPacketSize)) {
224 DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
225 return -EINVAL;
226 }
227
228 /* xfer types must match, except that interrupt ~= bulk */
229 if (ep->bmAttributes != desc->bmAttributes
230 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
231 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
232 DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
233 return -EINVAL;
234 }
235
236 /* hardware _could_ do smaller, but driver doesn't */
237 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
238 && le16_to_cpu (desc->wMaxPacketSize)
239 != BULK_FIFO_SIZE)
240 || !desc->wMaxPacketSize) {
241 DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
242 return -ERANGE;
243 }
244
245 dev = ep->dev;
246 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
247 DMSG("%s, bogus device state\n", __FUNCTION__);
248 return -ESHUTDOWN;
249 }
250
251 ep->desc = desc;
252 ep->dma = -1;
253 ep->stopped = 0;
254 ep->pio_irqs = ep->dma_irqs = 0;
255 ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
256
257 /* flush fifo (mostly for OUT buffers) */
258 pxa2xx_ep_fifo_flush (_ep);
259
260 /* ... reset halt state too, if we could ... */
261
262#ifdef USE_DMA
263 /* for (some) bulk and ISO endpoints, try to get a DMA channel and
264 * bind it to the endpoint. otherwise use PIO.
265 */
266 switch (ep->bmAttributes) {
267 case USB_ENDPOINT_XFER_ISOC:
268 if (le16_to_cpu(desc->wMaxPacketSize) % 32)
269 break;
270 // fall through
271 case USB_ENDPOINT_XFER_BULK:
272 if (!use_dma || !ep->reg_drcmr)
273 break;
274 ep->dma = pxa_request_dma ((char *)_ep->name,
275 (le16_to_cpu (desc->wMaxPacketSize) > 64)
276 ? DMA_PRIO_MEDIUM /* some iso */
277 : DMA_PRIO_LOW,
278 dma_nodesc_handler, ep);
279 if (ep->dma >= 0) {
280 *ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
281 DMSG("%s using dma%d\n", _ep->name, ep->dma);
282 }
283 }
284#endif
285
286 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
287 return 0;
288}
289
290static int pxa2xx_ep_disable (struct usb_ep *_ep)
291{
292 struct pxa2xx_ep *ep;
David Brownell91987692005-05-07 13:20:19 -0700293 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295 ep = container_of (_ep, struct pxa2xx_ep, ep);
296 if (!_ep || !ep->desc) {
297 DMSG("%s, %s not enabled\n", __FUNCTION__,
298 _ep ? ep->ep.name : NULL);
299 return -EINVAL;
300 }
David Brownell91987692005-05-07 13:20:19 -0700301 local_irq_save(flags);
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 nuke (ep, -ESHUTDOWN);
304
305#ifdef USE_DMA
306 if (ep->dma >= 0) {
307 *ep->reg_drcmr = 0;
308 pxa_free_dma (ep->dma);
309 ep->dma = -1;
310 }
311#endif
312
313 /* flush fifo (mostly for IN buffers) */
314 pxa2xx_ep_fifo_flush (_ep);
315
316 ep->desc = NULL;
317 ep->stopped = 1;
318
David Brownell91987692005-05-07 13:20:19 -0700319 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
321 return 0;
322}
323
324/*-------------------------------------------------------------------------*/
325
326/* for the pxa2xx, these can just wrap kmalloc/kfree. gadget drivers
327 * must still pass correctly initialized endpoints, since other controller
328 * drivers may care about how it's currently set up (dma issues etc).
329 */
330
331/*
332 * pxa2xx_ep_alloc_request - allocate a request data structure
333 */
334static struct usb_request *
335pxa2xx_ep_alloc_request (struct usb_ep *_ep, int gfp_flags)
336{
337 struct pxa2xx_request *req;
338
339 req = kmalloc (sizeof *req, gfp_flags);
340 if (!req)
341 return NULL;
342
343 memset (req, 0, sizeof *req);
344 INIT_LIST_HEAD (&req->queue);
345 return &req->req;
346}
347
348
349/*
350 * pxa2xx_ep_free_request - deallocate a request data structure
351 */
352static void
353pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
354{
355 struct pxa2xx_request *req;
356
357 req = container_of (_req, struct pxa2xx_request, req);
358 WARN_ON (!list_empty (&req->queue));
359 kfree(req);
360}
361
362
363/* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
364 * no device-affinity and the heap works perfectly well for i/o buffers.
365 * It wastes much less memory than dma_alloc_coherent() would, and even
366 * prevents cacheline (32 bytes wide) sharing problems.
367 */
368static void *
369pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
370 dma_addr_t *dma, int gfp_flags)
371{
372 char *retval;
373
374 retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
375 if (retval)
376#ifdef USE_DMA
377 *dma = virt_to_bus (retval);
378#else
379 *dma = (dma_addr_t)~0;
380#endif
381 return retval;
382}
383
384static void
385pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
386 unsigned bytes)
387{
388 kfree (buf);
389}
390
391/*-------------------------------------------------------------------------*/
392
393/*
394 * done - retire a request; caller blocked irqs
395 */
396static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
397{
398 unsigned stopped = ep->stopped;
399
400 list_del_init(&req->queue);
401
402 if (likely (req->req.status == -EINPROGRESS))
403 req->req.status = status;
404 else
405 status = req->req.status;
406
407 if (status && status != -ESHUTDOWN)
408 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
409 ep->ep.name, &req->req, status,
410 req->req.actual, req->req.length);
411
412 /* don't modify queue heads during completion callback */
413 ep->stopped = 1;
414 req->req.complete(&ep->ep, &req->req);
415 ep->stopped = stopped;
416}
417
418
419static inline void ep0_idle (struct pxa2xx_udc *dev)
420{
421 dev->ep0state = EP0_IDLE;
422}
423
424static int
425write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
426{
427 u8 *buf;
428 unsigned length, count;
429
430 buf = req->req.buf + req->req.actual;
431 prefetch(buf);
432
433 /* how big will this packet be? */
434 length = min(req->req.length - req->req.actual, max);
435 req->req.actual += length;
436
437 count = length;
438 while (likely(count--))
439 *uddr = *buf++;
440
441 return length;
442}
443
444/*
445 * write to an IN endpoint fifo, as many packets as possible.
446 * irqs will use this to write the rest later.
447 * caller guarantees at least one packet buffer is ready (or a zlp).
448 */
449static int
450write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
451{
452 unsigned max;
453
454 max = le16_to_cpu(ep->desc->wMaxPacketSize);
455 do {
456 unsigned count;
457 int is_last, is_short;
458
459 count = write_packet(ep->reg_uddr, req, max);
460
461 /* last packet is usually short (or a zlp) */
462 if (unlikely (count != max))
463 is_last = is_short = 1;
464 else {
465 if (likely(req->req.length != req->req.actual)
466 || req->req.zero)
467 is_last = 0;
468 else
469 is_last = 1;
470 /* interrupt/iso maxpacket may not fill the fifo */
471 is_short = unlikely (max < ep->fifo_size);
472 }
473
474 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
475 ep->ep.name, count,
476 is_last ? "/L" : "", is_short ? "/S" : "",
477 req->req.length - req->req.actual, req);
478
479 /* let loose that packet. maybe try writing another one,
480 * double buffering might work. TSP, TPC, and TFS
481 * bit values are the same for all normal IN endpoints.
482 */
483 *ep->reg_udccs = UDCCS_BI_TPC;
484 if (is_short)
485 *ep->reg_udccs = UDCCS_BI_TSP;
486
487 /* requests complete when all IN data is in the FIFO */
488 if (is_last) {
489 done (ep, req, 0);
490 if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
491 pio_irq_disable (ep->bEndpointAddress);
492#ifdef USE_DMA
493 /* unaligned data and zlps couldn't use dma */
494 if (unlikely(!list_empty(&ep->queue))) {
495 req = list_entry(ep->queue.next,
496 struct pxa2xx_request, queue);
497 kick_dma(ep,req);
498 return 0;
499 }
500#endif
501 }
502 return 1;
503 }
504
505 // TODO experiment: how robust can fifo mode tweaking be?
506 // double buffering is off in the default fifo mode, which
507 // prevents TFS from being set here.
508
509 } while (*ep->reg_udccs & UDCCS_BI_TFS);
510 return 0;
511}
512
513/* caller asserts req->pending (ep0 irq status nyet cleared); starts
514 * ep0 data stage. these chips want very simple state transitions.
515 */
516static inline
517void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
518{
519 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
520 USIR0 = USIR0_IR0;
521 dev->req_pending = 0;
522 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
523 __FUNCTION__, tag, UDCCS0, flags);
524}
525
526static int
527write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
528{
529 unsigned count;
530 int is_short;
531
532 count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
533 ep->dev->stats.write.bytes += count;
534
535 /* last packet "must be" short (or a zlp) */
536 is_short = (count != EP0_FIFO_SIZE);
537
538 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
539 req->req.length - req->req.actual, req);
540
541 if (unlikely (is_short)) {
542 if (ep->dev->req_pending)
543 ep0start(ep->dev, UDCCS0_IPR, "short IN");
544 else
545 UDCCS0 = UDCCS0_IPR;
546
547 count = req->req.length;
548 done (ep, req, 0);
549 ep0_idle(ep->dev);
550#if 1
551 /* This seems to get rid of lost status irqs in some cases:
552 * host responds quickly, or next request involves config
553 * change automagic, or should have been hidden, or ...
554 *
555 * FIXME get rid of all udelays possible...
556 */
557 if (count >= EP0_FIFO_SIZE) {
558 count = 100;
559 do {
560 if ((UDCCS0 & UDCCS0_OPR) != 0) {
561 /* clear OPR, generate ack */
562 UDCCS0 = UDCCS0_OPR;
563 break;
564 }
565 count--;
566 udelay(1);
567 } while (count);
568 }
569#endif
570 } else if (ep->dev->req_pending)
571 ep0start(ep->dev, 0, "IN");
572 return is_short;
573}
574
575
576/*
577 * read_fifo - unload packet(s) from the fifo we use for usb OUT
578 * transfers and put them into the request. caller should have made
579 * sure there's at least one packet ready.
580 *
581 * returns true if the request completed because of short packet or the
582 * request buffer having filled (and maybe overran till end-of-packet).
583 */
584static int
585read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
586{
587 for (;;) {
588 u32 udccs;
589 u8 *buf;
590 unsigned bufferspace, count, is_short;
591
592 /* make sure there's a packet in the FIFO.
593 * UDCCS_{BO,IO}_RPC are all the same bit value.
594 * UDCCS_{BO,IO}_RNE are all the same bit value.
595 */
596 udccs = *ep->reg_udccs;
597 if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
598 break;
599 buf = req->req.buf + req->req.actual;
600 prefetchw(buf);
601 bufferspace = req->req.length - req->req.actual;
602
603 /* read all bytes from this packet */
604 if (likely (udccs & UDCCS_BO_RNE)) {
605 count = 1 + (0x0ff & *ep->reg_ubcr);
606 req->req.actual += min (count, bufferspace);
607 } else /* zlp */
608 count = 0;
609 is_short = (count < ep->ep.maxpacket);
610 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
611 ep->ep.name, udccs, count,
612 is_short ? "/S" : "",
613 req, req->req.actual, req->req.length);
614 while (likely (count-- != 0)) {
615 u8 byte = (u8) *ep->reg_uddr;
616
617 if (unlikely (bufferspace == 0)) {
618 /* this happens when the driver's buffer
619 * is smaller than what the host sent.
620 * discard the extra data.
621 */
622 if (req->req.status != -EOVERFLOW)
623 DMSG("%s overflow %d\n",
624 ep->ep.name, count);
625 req->req.status = -EOVERFLOW;
626 } else {
627 *buf++ = byte;
628 bufferspace--;
629 }
630 }
631 *ep->reg_udccs = UDCCS_BO_RPC;
632 /* RPC/RSP/RNE could now reflect the other packet buffer */
633
634 /* iso is one request per packet */
635 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
636 if (udccs & UDCCS_IO_ROF)
637 req->req.status = -EHOSTUNREACH;
638 /* more like "is_done" */
639 is_short = 1;
640 }
641
642 /* completion */
643 if (is_short || req->req.actual == req->req.length) {
644 done (ep, req, 0);
645 if (list_empty(&ep->queue))
646 pio_irq_disable (ep->bEndpointAddress);
647 return 1;
648 }
649
650 /* finished that packet. the next one may be waiting... */
651 }
652 return 0;
653}
654
655/*
656 * special ep0 version of the above. no UBCR0 or double buffering; status
657 * handshaking is magic. most device protocols don't need control-OUT.
658 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
659 * protocols do use them.
660 */
661static int
662read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
663{
664 u8 *buf, byte;
665 unsigned bufferspace;
666
667 buf = req->req.buf + req->req.actual;
668 bufferspace = req->req.length - req->req.actual;
669
670 while (UDCCS0 & UDCCS0_RNE) {
671 byte = (u8) UDDR0;
672
673 if (unlikely (bufferspace == 0)) {
674 /* this happens when the driver's buffer
675 * is smaller than what the host sent.
676 * discard the extra data.
677 */
678 if (req->req.status != -EOVERFLOW)
679 DMSG("%s overflow\n", ep->ep.name);
680 req->req.status = -EOVERFLOW;
681 } else {
682 *buf++ = byte;
683 req->req.actual++;
684 bufferspace--;
685 }
686 }
687
688 UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
689
690 /* completion */
691 if (req->req.actual >= req->req.length)
692 return 1;
693
694 /* finished that packet. the next one may be waiting... */
695 return 0;
696}
697
698#ifdef USE_DMA
699
700#define MAX_IN_DMA ((DCMD_LENGTH + 1) - BULK_FIFO_SIZE)
701
702static void
703start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
704{
705 u32 dcmd = req->req.length;
706 u32 buf = req->req.dma;
707 u32 fifo = io_v2p ((u32)ep->reg_uddr);
708
709 /* caller guarantees there's a packet or more remaining
710 * - IN may end with a short packet (TSP set separately),
711 * - OUT is always full length
712 */
713 buf += req->req.actual;
714 dcmd -= req->req.actual;
715 ep->dma_fixup = 0;
716
717 /* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
718 DCSR(ep->dma) = DCSR_NODESC;
719 if (is_in) {
720 DSADR(ep->dma) = buf;
721 DTADR(ep->dma) = fifo;
722 if (dcmd > MAX_IN_DMA)
723 dcmd = MAX_IN_DMA;
724 else
725 ep->dma_fixup = (dcmd % ep->ep.maxpacket) != 0;
726 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
727 | DCMD_FLOWTRG | DCMD_INCSRCADDR;
728 } else {
729#ifdef USE_OUT_DMA
730 DSADR(ep->dma) = fifo;
731 DTADR(ep->dma) = buf;
732 if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
733 dcmd = ep->ep.maxpacket;
734 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
735 | DCMD_FLOWSRC | DCMD_INCTRGADDR;
736#endif
737 }
738 DCMD(ep->dma) = dcmd;
739 DCSR(ep->dma) = DCSR_RUN | DCSR_NODESC
740 | (unlikely(is_in)
741 ? DCSR_STOPIRQEN /* use dma_nodesc_handler() */
742 : 0); /* use handle_ep() */
743}
744
745static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
746{
747 int is_in = ep->bEndpointAddress & USB_DIR_IN;
748
749 if (is_in) {
750 /* unaligned tx buffers and zlps only work with PIO */
751 if ((req->req.dma & 0x0f) != 0
752 || unlikely((req->req.length - req->req.actual)
753 == 0)) {
754 pio_irq_enable(ep->bEndpointAddress);
755 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
756 (void) write_fifo(ep, req);
757 } else {
758 start_dma_nodesc(ep, req, USB_DIR_IN);
759 }
760 } else {
761 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) {
762 DMSG("%s short dma read...\n", ep->ep.name);
763 /* we're always set up for pio out */
764 read_fifo (ep, req);
765 } else {
766 *ep->reg_udccs = UDCCS_BO_DME
767 | (*ep->reg_udccs & UDCCS_BO_FST);
768 start_dma_nodesc(ep, req, USB_DIR_OUT);
769 }
770 }
771}
772
773static void cancel_dma(struct pxa2xx_ep *ep)
774{
775 struct pxa2xx_request *req;
776 u32 tmp;
777
778 if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
779 return;
780
781 DCSR(ep->dma) = 0;
782 while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
783 cpu_relax();
784
785 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
786 tmp = DCMD(ep->dma) & DCMD_LENGTH;
787 req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
788
789 /* the last tx packet may be incomplete, so flush the fifo.
790 * FIXME correct req.actual if we can
791 */
792 if (ep->bEndpointAddress & USB_DIR_IN)
793 *ep->reg_udccs = UDCCS_BI_FTF;
794}
795
796/* dma channel stopped ... normal tx end (IN), or on error (IN/OUT) */
797static void dma_nodesc_handler(int dmach, void *_ep, struct pt_regs *r)
798{
799 struct pxa2xx_ep *ep = _ep;
800 struct pxa2xx_request *req;
801 u32 tmp, completed;
802
803 local_irq_disable();
804
805 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
806
807 ep->dma_irqs++;
808 ep->dev->stats.irqs++;
809 HEX_DISPLAY(ep->dev->stats.irqs);
810
811 /* ack/clear */
812 tmp = DCSR(ep->dma);
813 DCSR(ep->dma) = tmp;
814 if ((tmp & DCSR_STOPSTATE) == 0
815 || (DDADR(ep->dma) & DDADR_STOP) != 0) {
816 DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
817 ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
818 goto done;
819 }
820 DCSR(ep->dma) = 0; /* clear DCSR_STOPSTATE */
821
822 /* update transfer status */
823 completed = tmp & DCSR_BUSERR;
824 if (ep->bEndpointAddress & USB_DIR_IN)
825 tmp = DSADR(ep->dma);
826 else
827 tmp = DTADR(ep->dma);
828 req->req.actual = tmp - req->req.dma;
829
830 /* FIXME seems we sometimes see partial transfers... */
831
832 if (unlikely(completed != 0))
833 req->req.status = -EIO;
834 else if (req->req.actual) {
835 /* these registers have zeroes in low bits; they miscount
836 * some (end-of-transfer) short packets: tx 14 as tx 12
837 */
838 if (ep->dma_fixup)
839 req->req.actual = min(req->req.actual + 3,
840 req->req.length);
841
842 tmp = (req->req.length - req->req.actual);
843 completed = (tmp == 0);
844 if (completed && (ep->bEndpointAddress & USB_DIR_IN)) {
845
846 /* maybe validate final short packet ... */
847 if ((req->req.actual % ep->ep.maxpacket) != 0)
848 *ep->reg_udccs = UDCCS_BI_TSP/*|UDCCS_BI_TPC*/;
849
850 /* ... or zlp, using pio fallback */
851 else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
852 && req->req.zero) {
853 DMSG("%s zlp terminate ...\n", ep->ep.name);
854 completed = 0;
855 }
856 }
857 }
858
859 if (likely(completed)) {
860 done(ep, req, 0);
861
862 /* maybe re-activate after completion */
863 if (ep->stopped || list_empty(&ep->queue))
864 goto done;
865 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
866 }
867 kick_dma(ep, req);
868done:
869 local_irq_enable();
870}
871
872#endif
873
874/*-------------------------------------------------------------------------*/
875
876static int
877pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
878{
879 struct pxa2xx_request *req;
880 struct pxa2xx_ep *ep;
881 struct pxa2xx_udc *dev;
882 unsigned long flags;
883
884 req = container_of(_req, struct pxa2xx_request, req);
885 if (unlikely (!_req || !_req->complete || !_req->buf
886 || !list_empty(&req->queue))) {
887 DMSG("%s, bad params\n", __FUNCTION__);
888 return -EINVAL;
889 }
890
891 ep = container_of(_ep, struct pxa2xx_ep, ep);
892 if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
893 DMSG("%s, bad ep\n", __FUNCTION__);
894 return -EINVAL;
895 }
896
897 dev = ep->dev;
898 if (unlikely (!dev->driver
899 || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
900 DMSG("%s, bogus device state\n", __FUNCTION__);
901 return -ESHUTDOWN;
902 }
903
904 /* iso is always one packet per request, that's the only way
905 * we can report per-packet status. that also helps with dma.
906 */
907 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
908 && req->req.length > le16_to_cpu
909 (ep->desc->wMaxPacketSize)))
910 return -EMSGSIZE;
911
912#ifdef USE_DMA
913 // FIXME caller may already have done the dma mapping
914 if (ep->dma >= 0) {
915 _req->dma = dma_map_single(dev->dev,
916 _req->buf, _req->length,
917 ((ep->bEndpointAddress & USB_DIR_IN) != 0)
918 ? DMA_TO_DEVICE
919 : DMA_FROM_DEVICE);
920 }
921#endif
922
923 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
924 _ep->name, _req, _req->length, _req->buf);
925
926 local_irq_save(flags);
927
928 _req->status = -EINPROGRESS;
929 _req->actual = 0;
930
931 /* kickstart this i/o queue? */
932 if (list_empty(&ep->queue) && !ep->stopped) {
933 if (ep->desc == 0 /* ep0 */) {
934 unsigned length = _req->length;
935
936 switch (dev->ep0state) {
937 case EP0_IN_DATA_PHASE:
938 dev->stats.write.ops++;
939 if (write_ep0_fifo(ep, req))
940 req = NULL;
941 break;
942
943 case EP0_OUT_DATA_PHASE:
944 dev->stats.read.ops++;
945 /* messy ... */
946 if (dev->req_config) {
947 DBG(DBG_VERBOSE, "ep0 config ack%s\n",
948 dev->has_cfr ? "" : " raced");
949 if (dev->has_cfr)
950 UDCCFR = UDCCFR_AREN|UDCCFR_ACM
951 |UDCCFR_MB1;
952 done(ep, req, 0);
953 dev->ep0state = EP0_END_XFER;
954 local_irq_restore (flags);
955 return 0;
956 }
957 if (dev->req_pending)
958 ep0start(dev, UDCCS0_IPR, "OUT");
959 if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
960 && read_ep0_fifo(ep, req))) {
961 ep0_idle(dev);
962 done(ep, req, 0);
963 req = NULL;
964 }
965 break;
966
967 default:
968 DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
969 local_irq_restore (flags);
970 return -EL2HLT;
971 }
972#ifdef USE_DMA
973 /* either start dma or prime pio pump */
974 } else if (ep->dma >= 0) {
975 kick_dma(ep, req);
976#endif
977 /* can the FIFO can satisfy the request immediately? */
David Brownell91987692005-05-07 13:20:19 -0700978 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
979 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0
980 && write_fifo(ep, req))
981 req = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
983 && read_fifo(ep, req)) {
984 req = NULL;
985 }
986
987 if (likely (req && ep->desc) && ep->dma < 0)
988 pio_irq_enable(ep->bEndpointAddress);
989 }
990
991 /* pio or dma irq handler advances the queue. */
992 if (likely (req != 0))
993 list_add_tail(&req->queue, &ep->queue);
994 local_irq_restore(flags);
995
996 return 0;
997}
998
999
1000/*
1001 * nuke - dequeue ALL requests
1002 */
1003static void nuke(struct pxa2xx_ep *ep, int status)
1004{
1005 struct pxa2xx_request *req;
1006
1007 /* called with irqs blocked */
1008#ifdef USE_DMA
1009 if (ep->dma >= 0 && !ep->stopped)
1010 cancel_dma(ep);
1011#endif
1012 while (!list_empty(&ep->queue)) {
1013 req = list_entry(ep->queue.next,
1014 struct pxa2xx_request,
1015 queue);
1016 done(ep, req, status);
1017 }
1018 if (ep->desc)
1019 pio_irq_disable (ep->bEndpointAddress);
1020}
1021
1022
1023/* dequeue JUST ONE request */
1024static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1025{
1026 struct pxa2xx_ep *ep;
1027 struct pxa2xx_request *req;
1028 unsigned long flags;
1029
1030 ep = container_of(_ep, struct pxa2xx_ep, ep);
1031 if (!_ep || ep->ep.name == ep0name)
1032 return -EINVAL;
1033
1034 local_irq_save(flags);
1035
1036 /* make sure it's actually queued on this endpoint */
1037 list_for_each_entry (req, &ep->queue, queue) {
1038 if (&req->req == _req)
1039 break;
1040 }
1041 if (&req->req != _req) {
1042 local_irq_restore(flags);
1043 return -EINVAL;
1044 }
1045
1046#ifdef USE_DMA
1047 if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
1048 cancel_dma(ep);
1049 done(ep, req, -ECONNRESET);
1050 /* restart i/o */
1051 if (!list_empty(&ep->queue)) {
1052 req = list_entry(ep->queue.next,
1053 struct pxa2xx_request, queue);
1054 kick_dma(ep, req);
1055 }
1056 } else
1057#endif
1058 done(ep, req, -ECONNRESET);
1059
1060 local_irq_restore(flags);
1061 return 0;
1062}
1063
1064/*-------------------------------------------------------------------------*/
1065
1066static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
1067{
1068 struct pxa2xx_ep *ep;
1069 unsigned long flags;
1070
1071 ep = container_of(_ep, struct pxa2xx_ep, ep);
1072 if (unlikely (!_ep
1073 || (!ep->desc && ep->ep.name != ep0name))
1074 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
1075 DMSG("%s, bad ep\n", __FUNCTION__);
1076 return -EINVAL;
1077 }
1078 if (value == 0) {
1079 /* this path (reset toggle+halt) is needed to implement
1080 * SET_INTERFACE on normal hardware. but it can't be
1081 * done from software on the PXA UDC, and the hardware
1082 * forgets to do it as part of SET_INTERFACE automagic.
1083 */
1084 DMSG("only host can clear %s halt\n", _ep->name);
1085 return -EROFS;
1086 }
1087
1088 local_irq_save(flags);
1089
1090 if ((ep->bEndpointAddress & USB_DIR_IN) != 0
1091 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
1092 || !list_empty(&ep->queue))) {
1093 local_irq_restore(flags);
1094 return -EAGAIN;
1095 }
1096
1097 /* FST bit is the same for control, bulk in, bulk out, interrupt in */
1098 *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
1099
1100 /* ep0 needs special care */
1101 if (!ep->desc) {
1102 start_watchdog(ep->dev);
1103 ep->dev->req_pending = 0;
1104 ep->dev->ep0state = EP0_STALL;
1105
1106 /* and bulk/intr endpoints like dropping stalls too */
1107 } else {
1108 unsigned i;
1109 for (i = 0; i < 1000; i += 20) {
1110 if (*ep->reg_udccs & UDCCS_BI_SST)
1111 break;
1112 udelay(20);
1113 }
1114 }
1115 local_irq_restore(flags);
1116
1117 DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
1118 return 0;
1119}
1120
1121static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
1122{
1123 struct pxa2xx_ep *ep;
1124
1125 ep = container_of(_ep, struct pxa2xx_ep, ep);
1126 if (!_ep) {
1127 DMSG("%s, bad ep\n", __FUNCTION__);
1128 return -ENODEV;
1129 }
1130 /* pxa can't report unclaimed bytes from IN fifos */
1131 if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
1132 return -EOPNOTSUPP;
1133 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
1134 || (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
1135 return 0;
1136 else
1137 return (*ep->reg_ubcr & 0xfff) + 1;
1138}
1139
1140static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
1141{
1142 struct pxa2xx_ep *ep;
1143
1144 ep = container_of(_ep, struct pxa2xx_ep, ep);
1145 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
1146 DMSG("%s, bad ep\n", __FUNCTION__);
1147 return;
1148 }
1149
1150 /* toggle and halt bits stay unchanged */
1151
1152 /* for OUT, just read and discard the FIFO contents. */
1153 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
1154 while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
1155 (void) *ep->reg_uddr;
1156 return;
1157 }
1158
1159 /* most IN status is the same, but ISO can't stall */
1160 *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
1161 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
1162 ? 0 : UDCCS_BI_SST;
1163}
1164
1165
1166static struct usb_ep_ops pxa2xx_ep_ops = {
1167 .enable = pxa2xx_ep_enable,
1168 .disable = pxa2xx_ep_disable,
1169
1170 .alloc_request = pxa2xx_ep_alloc_request,
1171 .free_request = pxa2xx_ep_free_request,
1172
1173 .alloc_buffer = pxa2xx_ep_alloc_buffer,
1174 .free_buffer = pxa2xx_ep_free_buffer,
1175
1176 .queue = pxa2xx_ep_queue,
1177 .dequeue = pxa2xx_ep_dequeue,
1178
1179 .set_halt = pxa2xx_ep_set_halt,
1180 .fifo_status = pxa2xx_ep_fifo_status,
1181 .fifo_flush = pxa2xx_ep_fifo_flush,
1182};
1183
1184
1185/* ---------------------------------------------------------------------------
1186 * device-scoped parts of the api to the usb controller hardware
1187 * ---------------------------------------------------------------------------
1188 */
1189
1190static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget)
1191{
1192 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
1193}
1194
1195static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
1196{
1197 /* host may not have enabled remote wakeup */
1198 if ((UDCCS0 & UDCCS0_DRWF) == 0)
1199 return -EHOSTUNREACH;
1200 udc_set_mask_UDCCR(UDCCR_RSM);
1201 return 0;
1202}
1203
1204static void stop_activity(struct pxa2xx_udc *, struct usb_gadget_driver *);
1205static void udc_enable (struct pxa2xx_udc *);
1206static void udc_disable(struct pxa2xx_udc *);
1207
1208/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
1209 * in active use.
1210 */
1211static int pullup(struct pxa2xx_udc *udc, int is_active)
1212{
1213 is_active = is_active && udc->vbus && udc->pullup;
1214 DMSG("%s\n", is_active ? "active" : "inactive");
1215 if (is_active)
1216 udc_enable(udc);
1217 else {
1218 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1219 DMSG("disconnect %s\n", udc->driver
1220 ? udc->driver->driver.name
1221 : "(no driver)");
1222 stop_activity(udc, udc->driver);
1223 }
1224 udc_disable(udc);
1225 }
1226 return 0;
1227}
1228
1229/* VBUS reporting logically comes from a transceiver */
1230static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1231{
1232 struct pxa2xx_udc *udc;
1233
1234 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1235 udc->vbus = is_active = (is_active != 0);
1236 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
1237 pullup(udc, is_active);
1238 return 0;
1239}
1240
1241/* drivers may have software control over D+ pullup */
1242static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
1243{
1244 struct pxa2xx_udc *udc;
1245
1246 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1247
1248 /* not all boards support pullup control */
1249 if (!udc->mach->udc_command)
1250 return -EOPNOTSUPP;
1251
1252 is_active = (is_active != 0);
1253 udc->pullup = is_active;
1254 pullup(udc, is_active);
1255 return 0;
1256}
1257
1258static const struct usb_gadget_ops pxa2xx_udc_ops = {
1259 .get_frame = pxa2xx_udc_get_frame,
1260 .wakeup = pxa2xx_udc_wakeup,
1261 .vbus_session = pxa2xx_udc_vbus_session,
1262 .pullup = pxa2xx_udc_pullup,
1263
1264 // .vbus_draw ... boards may consume current from VBUS, up to
1265 // 100-500mA based on config. the 500uA suspend ceiling means
1266 // that exclusively vbus-powered PXA designs violate USB specs.
1267};
1268
1269/*-------------------------------------------------------------------------*/
1270
1271#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1272
1273static const char proc_node_name [] = "driver/udc";
1274
1275static int
1276udc_proc_read(char *page, char **start, off_t off, int count,
1277 int *eof, void *_dev)
1278{
1279 char *buf = page;
1280 struct pxa2xx_udc *dev = _dev;
1281 char *next = buf;
1282 unsigned size = count;
1283 unsigned long flags;
1284 int i, t;
1285 u32 tmp;
1286
1287 if (off != 0)
1288 return 0;
1289
1290 local_irq_save(flags);
1291
1292 /* basic device status */
1293 t = scnprintf(next, size, DRIVER_DESC "\n"
1294 "%s version: %s\nGadget driver: %s\nHost %s\n\n",
1295 driver_name, DRIVER_VERSION SIZE_STR DMASTR,
1296 dev->driver ? dev->driver->driver.name : "(none)",
David Brownell91987692005-05-07 13:20:19 -07001297 is_vbus_present() ? "full speed" : "disconnected");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 size -= t;
1299 next += t;
1300
1301 /* registers for device and ep0 */
1302 t = scnprintf(next, size,
1303 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
1304 UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
1305 size -= t;
1306 next += t;
1307
1308 tmp = UDCCR;
1309 t = scnprintf(next, size,
1310 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
1311 (tmp & UDCCR_REM) ? " rem" : "",
1312 (tmp & UDCCR_RSTIR) ? " rstir" : "",
1313 (tmp & UDCCR_SRM) ? " srm" : "",
1314 (tmp & UDCCR_SUSIR) ? " susir" : "",
1315 (tmp & UDCCR_RESIR) ? " resir" : "",
1316 (tmp & UDCCR_RSM) ? " rsm" : "",
1317 (tmp & UDCCR_UDA) ? " uda" : "",
1318 (tmp & UDCCR_UDE) ? " ude" : "");
1319 size -= t;
1320 next += t;
1321
1322 tmp = UDCCS0;
1323 t = scnprintf(next, size,
1324 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
1325 (tmp & UDCCS0_SA) ? " sa" : "",
1326 (tmp & UDCCS0_RNE) ? " rne" : "",
1327 (tmp & UDCCS0_FST) ? " fst" : "",
1328 (tmp & UDCCS0_SST) ? " sst" : "",
1329 (tmp & UDCCS0_DRWF) ? " dwrf" : "",
1330 (tmp & UDCCS0_FTF) ? " ftf" : "",
1331 (tmp & UDCCS0_IPR) ? " ipr" : "",
1332 (tmp & UDCCS0_OPR) ? " opr" : "");
1333 size -= t;
1334 next += t;
1335
1336 if (dev->has_cfr) {
1337 tmp = UDCCFR;
1338 t = scnprintf(next, size,
1339 "udccfr %02X =%s%s\n", tmp,
1340 (tmp & UDCCFR_AREN) ? " aren" : "",
1341 (tmp & UDCCFR_ACM) ? " acm" : "");
1342 size -= t;
1343 next += t;
1344 }
1345
David Brownell91987692005-05-07 13:20:19 -07001346 if (!is_vbus_present() || !dev->driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 goto done;
1348
1349 t = scnprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
1350 dev->stats.write.bytes, dev->stats.write.ops,
1351 dev->stats.read.bytes, dev->stats.read.ops,
1352 dev->stats.irqs);
1353 size -= t;
1354 next += t;
1355
1356 /* dump endpoint queues */
1357 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1358 struct pxa2xx_ep *ep = &dev->ep [i];
1359 struct pxa2xx_request *req;
1360 int t;
1361
1362 if (i != 0) {
1363 const struct usb_endpoint_descriptor *d;
1364
1365 d = ep->desc;
1366 if (!d)
1367 continue;
1368 tmp = *dev->ep [i].reg_udccs;
1369 t = scnprintf(next, size,
1370 "%s max %d %s udccs %02x irqs %lu/%lu\n",
1371 ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
1372 (ep->dma >= 0) ? "dma" : "pio", tmp,
1373 ep->pio_irqs, ep->dma_irqs);
1374 /* TODO translate all five groups of udccs bits! */
1375
1376 } else /* ep0 should only have one transfer queued */
1377 t = scnprintf(next, size, "ep0 max 16 pio irqs %lu\n",
1378 ep->pio_irqs);
1379 if (t <= 0 || t > size)
1380 goto done;
1381 size -= t;
1382 next += t;
1383
1384 if (list_empty(&ep->queue)) {
1385 t = scnprintf(next, size, "\t(nothing queued)\n");
1386 if (t <= 0 || t > size)
1387 goto done;
1388 size -= t;
1389 next += t;
1390 continue;
1391 }
1392 list_for_each_entry(req, &ep->queue, queue) {
1393#ifdef USE_DMA
1394 if (ep->dma >= 0 && req->queue.prev == &ep->queue)
1395 t = scnprintf(next, size,
1396 "\treq %p len %d/%d "
1397 "buf %p (dma%d dcmd %08x)\n",
1398 &req->req, req->req.actual,
1399 req->req.length, req->req.buf,
1400 ep->dma, DCMD(ep->dma)
1401 // low 13 bits == bytes-to-go
1402 );
1403 else
1404#endif
1405 t = scnprintf(next, size,
1406 "\treq %p len %d/%d buf %p\n",
1407 &req->req, req->req.actual,
1408 req->req.length, req->req.buf);
1409 if (t <= 0 || t > size)
1410 goto done;
1411 size -= t;
1412 next += t;
1413 }
1414 }
1415
1416done:
1417 local_irq_restore(flags);
1418 *eof = 1;
1419 return count - size;
1420}
1421
1422#define create_proc_files() \
1423 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
1424#define remove_proc_files() \
1425 remove_proc_entry(proc_node_name, NULL)
1426
1427#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
1428
1429#define create_proc_files() do {} while (0)
1430#define remove_proc_files() do {} while (0)
1431
1432#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1433
1434/* "function" sysfs attribute */
1435static ssize_t
Yani Ioannou10523b32005-05-17 06:43:37 -04001436show_function (struct device *_dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437{
1438 struct pxa2xx_udc *dev = dev_get_drvdata (_dev);
1439
1440 if (!dev->driver
1441 || !dev->driver->function
1442 || strlen (dev->driver->function) > PAGE_SIZE)
1443 return 0;
1444 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1445}
1446static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1447
1448/*-------------------------------------------------------------------------*/
1449
1450/*
1451 * udc_disable - disable USB device controller
1452 */
1453static void udc_disable(struct pxa2xx_udc *dev)
1454{
1455 /* block all irqs */
1456 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
1457 UICR0 = UICR1 = 0xff;
1458 UFNRH = UFNRH_SIM;
1459
1460 /* if hardware supports it, disconnect from usb */
David Brownell91987692005-05-07 13:20:19 -07001461 pullup_off();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
1463 udc_clear_mask_UDCCR(UDCCR_UDE);
1464
1465#ifdef CONFIG_ARCH_PXA
1466 /* Disable clock for USB device */
1467 pxa_set_cken(CKEN11_USB, 0);
1468#endif
1469
1470 ep0_idle (dev);
1471 dev->gadget.speed = USB_SPEED_UNKNOWN;
1472 LED_CONNECTED_OFF;
1473}
1474
1475
1476/*
1477 * udc_reinit - initialize software state
1478 */
1479static void udc_reinit(struct pxa2xx_udc *dev)
1480{
1481 u32 i;
1482
1483 /* device/ep0 records init */
1484 INIT_LIST_HEAD (&dev->gadget.ep_list);
1485 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1486 dev->ep0state = EP0_IDLE;
1487
1488 /* basic endpoint records init */
1489 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1490 struct pxa2xx_ep *ep = &dev->ep[i];
1491
1492 if (i != 0)
1493 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1494
1495 ep->desc = NULL;
1496 ep->stopped = 0;
1497 INIT_LIST_HEAD (&ep->queue);
1498 ep->pio_irqs = ep->dma_irqs = 0;
1499 }
1500
1501 /* the rest was statically initialized, and is read-only */
1502}
1503
1504/* until it's enabled, this UDC should be completely invisible
1505 * to any USB host.
1506 */
1507static void udc_enable (struct pxa2xx_udc *dev)
1508{
1509 udc_clear_mask_UDCCR(UDCCR_UDE);
1510
1511#ifdef CONFIG_ARCH_PXA
1512 /* Enable clock for USB device */
1513 pxa_set_cken(CKEN11_USB, 1);
1514 udelay(5);
1515#endif
1516
1517 /* try to clear these bits before we enable the udc */
1518 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1519
1520 ep0_idle(dev);
1521 dev->gadget.speed = USB_SPEED_UNKNOWN;
1522 dev->stats.irqs = 0;
1523
1524 /*
1525 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
1526 * - enable UDC
1527 * - if RESET is already in progress, ack interrupt
1528 * - unmask reset interrupt
1529 */
1530 udc_set_mask_UDCCR(UDCCR_UDE);
1531 if (!(UDCCR & UDCCR_UDA))
1532 udc_ack_int_UDCCR(UDCCR_RSTIR);
1533
1534 if (dev->has_cfr /* UDC_RES2 is defined */) {
1535 /* pxa255 (a0+) can avoid a set_config race that could
1536 * prevent gadget drivers from configuring correctly
1537 */
1538 UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
1539 } else {
1540 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
1541 * which could result in missing packets and interrupts.
1542 * supposedly one bit per endpoint, controlling whether it
1543 * double buffers or not; ACM/AREN bits fit into the holes.
1544 * zero bits (like USIR0_IRx) disable double buffering.
1545 */
1546 UDC_RES1 = 0x00;
1547 UDC_RES2 = 0x00;
1548 }
1549
1550#ifdef DISABLE_TEST_MODE
1551 /* "test mode" seems to have become the default in later chip
1552 * revs, preventing double buffering (and invalidating docs).
1553 * this EXPERIMENT enables it for bulk endpoints by tweaking
1554 * undefined/reserved register bits (that other drivers clear).
1555 * Belcarra code comments noted this usage.
1556 */
1557 if (fifo_mode & 1) { /* IN endpoints */
1558 UDC_RES1 |= USIR0_IR1|USIR0_IR6;
1559 UDC_RES2 |= USIR1_IR11;
1560 }
1561 if (fifo_mode & 2) { /* OUT endpoints */
1562 UDC_RES1 |= USIR0_IR2|USIR0_IR7;
1563 UDC_RES2 |= USIR1_IR12;
1564 }
1565#endif
1566
1567 /* enable suspend/resume and reset irqs */
1568 udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
1569
1570 /* enable ep0 irqs */
1571 UICR0 &= ~UICR0_IM0;
1572
1573 /* if hardware supports it, pullup D+ and wait for reset */
David Brownell91987692005-05-07 13:20:19 -07001574 pullup_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
1576
1577
1578/* when a driver is successfully registered, it will receive
1579 * control requests including set_configuration(), which enables
1580 * non-control requests. then usb traffic follows until a
1581 * disconnect is reported. then a host may connect again, or
1582 * the driver might get unbound.
1583 */
1584int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1585{
1586 struct pxa2xx_udc *dev = the_controller;
1587 int retval;
1588
1589 if (!driver
1590 || driver->speed != USB_SPEED_FULL
1591 || !driver->bind
1592 || !driver->unbind
1593 || !driver->disconnect
1594 || !driver->setup)
1595 return -EINVAL;
1596 if (!dev)
1597 return -ENODEV;
1598 if (dev->driver)
1599 return -EBUSY;
1600
1601 /* first hook up the driver ... */
1602 dev->driver = driver;
1603 dev->gadget.dev.driver = &driver->driver;
1604 dev->pullup = 1;
1605
1606 device_add (&dev->gadget.dev);
1607 retval = driver->bind(&dev->gadget);
1608 if (retval) {
1609 DMSG("bind to driver %s --> error %d\n",
1610 driver->driver.name, retval);
1611 device_del (&dev->gadget.dev);
1612
1613 dev->driver = NULL;
1614 dev->gadget.dev.driver = NULL;
1615 return retval;
1616 }
1617 device_create_file(dev->dev, &dev_attr_function);
1618
1619 /* ... then enable host detection and ep0; and we're ready
1620 * for set_configuration as well as eventual disconnect.
1621 */
1622 DMSG("registered gadget driver '%s'\n", driver->driver.name);
1623 pullup(dev, 1);
1624 dump_state(dev);
1625 return 0;
1626}
1627EXPORT_SYMBOL(usb_gadget_register_driver);
1628
1629static void
1630stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
1631{
1632 int i;
1633
1634 /* don't disconnect drivers more than once */
1635 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1636 driver = NULL;
1637 dev->gadget.speed = USB_SPEED_UNKNOWN;
1638
1639 /* prevent new request submissions, kill any outstanding requests */
1640 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1641 struct pxa2xx_ep *ep = &dev->ep[i];
1642
1643 ep->stopped = 1;
1644 nuke(ep, -ESHUTDOWN);
1645 }
1646 del_timer_sync(&dev->timer);
1647
1648 /* report disconnect; the driver is already quiesced */
1649 LED_CONNECTED_OFF;
1650 if (driver)
1651 driver->disconnect(&dev->gadget);
1652
1653 /* re-init driver-visible data structures */
1654 udc_reinit(dev);
1655}
1656
1657int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1658{
1659 struct pxa2xx_udc *dev = the_controller;
1660
1661 if (!dev)
1662 return -ENODEV;
1663 if (!driver || driver != dev->driver)
1664 return -EINVAL;
1665
1666 local_irq_disable();
1667 pullup(dev, 0);
1668 stop_activity(dev, driver);
1669 local_irq_enable();
1670
1671 driver->unbind(&dev->gadget);
1672 dev->driver = NULL;
1673
1674 device_del (&dev->gadget.dev);
1675 device_remove_file(dev->dev, &dev_attr_function);
1676
1677 DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
1678 dump_state(dev);
1679 return 0;
1680}
1681EXPORT_SYMBOL(usb_gadget_unregister_driver);
1682
1683
1684/*-------------------------------------------------------------------------*/
1685
1686#ifdef CONFIG_ARCH_LUBBOCK
1687
1688/* Lubbock has separate connect and disconnect irqs. More typical designs
1689 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
1690 */
1691
1692static irqreturn_t
1693lubbock_vbus_irq(int irq, void *_dev, struct pt_regs *r)
1694{
1695 struct pxa2xx_udc *dev = _dev;
1696 int vbus;
1697
1698 dev->stats.irqs++;
1699 HEX_DISPLAY(dev->stats.irqs);
1700 switch (irq) {
1701 case LUBBOCK_USB_IRQ:
1702 LED_CONNECTED_ON;
1703 vbus = 1;
1704 disable_irq(LUBBOCK_USB_IRQ);
1705 enable_irq(LUBBOCK_USB_DISC_IRQ);
1706 break;
1707 case LUBBOCK_USB_DISC_IRQ:
1708 LED_CONNECTED_OFF;
1709 vbus = 0;
1710 disable_irq(LUBBOCK_USB_DISC_IRQ);
1711 enable_irq(LUBBOCK_USB_IRQ);
1712 break;
1713 default:
1714 return IRQ_NONE;
1715 }
1716
1717 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1718 return IRQ_HANDLED;
1719}
1720
1721#endif
1722
1723
1724/*-------------------------------------------------------------------------*/
1725
1726static inline void clear_ep_state (struct pxa2xx_udc *dev)
1727{
1728 unsigned i;
1729
1730 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
1731 * fifos, and pending transactions mustn't be continued in any case.
1732 */
1733 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
1734 nuke(&dev->ep[i], -ECONNABORTED);
1735}
1736
1737static void udc_watchdog(unsigned long _dev)
1738{
1739 struct pxa2xx_udc *dev = (void *)_dev;
1740
1741 local_irq_disable();
1742 if (dev->ep0state == EP0_STALL
1743 && (UDCCS0 & UDCCS0_FST) == 0
1744 && (UDCCS0 & UDCCS0_SST) == 0) {
1745 UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
1746 DBG(DBG_VERBOSE, "ep0 re-stall\n");
1747 start_watchdog(dev);
1748 }
1749 local_irq_enable();
1750}
1751
1752static void handle_ep0 (struct pxa2xx_udc *dev)
1753{
1754 u32 udccs0 = UDCCS0;
1755 struct pxa2xx_ep *ep = &dev->ep [0];
1756 struct pxa2xx_request *req;
1757 union {
1758 struct usb_ctrlrequest r;
1759 u8 raw [8];
1760 u32 word [2];
1761 } u;
1762
1763 if (list_empty(&ep->queue))
1764 req = NULL;
1765 else
1766 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
1767
1768 /* clear stall status */
1769 if (udccs0 & UDCCS0_SST) {
1770 nuke(ep, -EPIPE);
1771 UDCCS0 = UDCCS0_SST;
1772 del_timer(&dev->timer);
1773 ep0_idle(dev);
1774 }
1775
1776 /* previous request unfinished? non-error iff back-to-back ... */
1777 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
1778 nuke(ep, 0);
1779 del_timer(&dev->timer);
1780 ep0_idle(dev);
1781 }
1782
1783 switch (dev->ep0state) {
1784 case EP0_IDLE:
1785 /* late-breaking status? */
1786 udccs0 = UDCCS0;
1787
1788 /* start control request? */
1789 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
1790 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
1791 int i;
1792
1793 nuke (ep, -EPROTO);
1794
1795 /* read SETUP packet */
1796 for (i = 0; i < 8; i++) {
1797 if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
1798bad_setup:
1799 DMSG("SETUP %d!\n", i);
1800 goto stall;
1801 }
1802 u.raw [i] = (u8) UDDR0;
1803 }
1804 if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
1805 goto bad_setup;
1806
1807got_setup:
1808 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1809 u.r.bRequestType, u.r.bRequest,
1810 le16_to_cpu(u.r.wValue),
1811 le16_to_cpu(u.r.wIndex),
1812 le16_to_cpu(u.r.wLength));
1813
1814 /* cope with automagic for some standard requests. */
1815 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
1816 == USB_TYPE_STANDARD;
1817 dev->req_config = 0;
1818 dev->req_pending = 1;
1819 switch (u.r.bRequest) {
1820 /* hardware restricts gadget drivers here! */
1821 case USB_REQ_SET_CONFIGURATION:
1822 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1823 /* reflect hardware's automagic
1824 * up to the gadget driver.
1825 */
1826config_change:
1827 dev->req_config = 1;
1828 clear_ep_state(dev);
1829 /* if !has_cfr, there's no synch
1830 * else use AREN (later) not SA|OPR
1831 * USIR0_IR0 acts edge sensitive
1832 */
1833 }
1834 break;
1835 /* ... and here, even more ... */
1836 case USB_REQ_SET_INTERFACE:
1837 if (u.r.bRequestType == USB_RECIP_INTERFACE) {
1838 /* udc hardware is broken by design:
1839 * - altsetting may only be zero;
1840 * - hw resets all interfaces' eps;
1841 * - ep reset doesn't include halt(?).
1842 */
1843 DMSG("broken set_interface (%d/%d)\n",
1844 le16_to_cpu(u.r.wIndex),
1845 le16_to_cpu(u.r.wValue));
1846 goto config_change;
1847 }
1848 break;
1849 /* hardware was supposed to hide this */
1850 case USB_REQ_SET_ADDRESS:
1851 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1852 ep0start(dev, 0, "address");
1853 return;
1854 }
1855 break;
1856 }
1857
1858 if (u.r.bRequestType & USB_DIR_IN)
1859 dev->ep0state = EP0_IN_DATA_PHASE;
1860 else
1861 dev->ep0state = EP0_OUT_DATA_PHASE;
1862
1863 i = dev->driver->setup(&dev->gadget, &u.r);
1864 if (i < 0) {
1865 /* hardware automagic preventing STALL... */
1866 if (dev->req_config) {
1867 /* hardware sometimes neglects to tell
1868 * tell us about config change events,
1869 * so later ones may fail...
1870 */
1871 WARN("config change %02x fail %d?\n",
1872 u.r.bRequest, i);
1873 return;
1874 /* TODO experiment: if has_cfr,
1875 * hardware didn't ACK; maybe we
1876 * could actually STALL!
1877 */
1878 }
1879 DBG(DBG_VERBOSE, "protocol STALL, "
1880 "%02x err %d\n", UDCCS0, i);
1881stall:
1882 /* the watchdog timer helps deal with cases
1883 * where udc seems to clear FST wrongly, and
1884 * then NAKs instead of STALLing.
1885 */
1886 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
1887 start_watchdog(dev);
1888 dev->ep0state = EP0_STALL;
1889
1890 /* deferred i/o == no response yet */
1891 } else if (dev->req_pending) {
1892 if (likely(dev->ep0state == EP0_IN_DATA_PHASE
1893 || dev->req_std || u.r.wLength))
1894 ep0start(dev, 0, "defer");
1895 else
1896 ep0start(dev, UDCCS0_IPR, "defer/IPR");
1897 }
1898
1899 /* expect at least one data or status stage irq */
1900 return;
1901
1902 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
1903 == (UDCCS0_OPR|UDCCS0_SA))) {
1904 unsigned i;
1905
1906 /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
1907 * still observed on a pxa255 a0.
1908 */
1909 DBG(DBG_VERBOSE, "e131\n");
1910 nuke(ep, -EPROTO);
1911
1912 /* read SETUP data, but don't trust it too much */
1913 for (i = 0; i < 8; i++)
1914 u.raw [i] = (u8) UDDR0;
1915 if ((u.r.bRequestType & USB_RECIP_MASK)
1916 > USB_RECIP_OTHER)
1917 goto stall;
1918 if (u.word [0] == 0 && u.word [1] == 0)
1919 goto stall;
1920 goto got_setup;
1921 } else {
1922 /* some random early IRQ:
1923 * - we acked FST
1924 * - IPR cleared
1925 * - OPR got set, without SA (likely status stage)
1926 */
1927 UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
1928 }
1929 break;
1930 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
1931 if (udccs0 & UDCCS0_OPR) {
1932 UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
1933 DBG(DBG_VERBOSE, "ep0in premature status\n");
1934 if (req)
1935 done(ep, req, 0);
1936 ep0_idle(dev);
1937 } else /* irq was IPR clearing */ {
1938 if (req) {
1939 /* this IN packet might finish the request */
1940 (void) write_ep0_fifo(ep, req);
1941 } /* else IN token before response was written */
1942 }
1943 break;
1944 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
1945 if (udccs0 & UDCCS0_OPR) {
1946 if (req) {
1947 /* this OUT packet might finish the request */
1948 if (read_ep0_fifo(ep, req))
1949 done(ep, req, 0);
1950 /* else more OUT packets expected */
1951 } /* else OUT token before read was issued */
1952 } else /* irq was IPR clearing */ {
1953 DBG(DBG_VERBOSE, "ep0out premature status\n");
1954 if (req)
1955 done(ep, req, 0);
1956 ep0_idle(dev);
1957 }
1958 break;
1959 case EP0_END_XFER:
1960 if (req)
1961 done(ep, req, 0);
1962 /* ack control-IN status (maybe in-zlp was skipped)
1963 * also appears after some config change events.
1964 */
1965 if (udccs0 & UDCCS0_OPR)
1966 UDCCS0 = UDCCS0_OPR;
1967 ep0_idle(dev);
1968 break;
1969 case EP0_STALL:
1970 UDCCS0 = UDCCS0_FST;
1971 break;
1972 }
1973 USIR0 = USIR0_IR0;
1974}
1975
1976static void handle_ep(struct pxa2xx_ep *ep)
1977{
1978 struct pxa2xx_request *req;
1979 int is_in = ep->bEndpointAddress & USB_DIR_IN;
1980 int completed;
1981 u32 udccs, tmp;
1982
1983 do {
1984 completed = 0;
1985 if (likely (!list_empty(&ep->queue)))
1986 req = list_entry(ep->queue.next,
1987 struct pxa2xx_request, queue);
1988 else
1989 req = NULL;
1990
1991 // TODO check FST handling
1992
1993 udccs = *ep->reg_udccs;
1994 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
1995 tmp = UDCCS_BI_TUR;
1996 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
1997 tmp |= UDCCS_BI_SST;
1998 tmp &= udccs;
1999 if (likely (tmp))
2000 *ep->reg_udccs = tmp;
2001 if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
2002 completed = write_fifo(ep, req);
2003
2004 } else { /* irq from RPC (or for ISO, ROF) */
2005 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2006 tmp = UDCCS_BO_SST | UDCCS_BO_DME;
2007 else
2008 tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
2009 tmp &= udccs;
2010 if (likely(tmp))
2011 *ep->reg_udccs = tmp;
2012
2013 /* fifos can hold packets, ready for reading... */
2014 if (likely(req)) {
2015#ifdef USE_OUT_DMA
2016// TODO didn't yet debug out-dma. this approach assumes
2017// the worst about short packets and RPC; it might be better.
2018
2019 if (likely(ep->dma >= 0)) {
2020 if (!(udccs & UDCCS_BO_RSP)) {
2021 *ep->reg_udccs = UDCCS_BO_RPC;
2022 ep->dma_irqs++;
2023 return;
2024 }
2025 }
2026#endif
2027 completed = read_fifo(ep, req);
2028 } else
2029 pio_irq_disable (ep->bEndpointAddress);
2030 }
2031 ep->pio_irqs++;
2032 } while (completed);
2033}
2034
2035/*
2036 * pxa2xx_udc_irq - interrupt handler
2037 *
2038 * avoid delays in ep0 processing. the control handshaking isn't always
2039 * under software control (pxa250c0 and the pxa255 are better), and delays
2040 * could cause usb protocol errors.
2041 */
2042static irqreturn_t
2043pxa2xx_udc_irq(int irq, void *_dev, struct pt_regs *r)
2044{
2045 struct pxa2xx_udc *dev = _dev;
2046 int handled;
2047
2048 dev->stats.irqs++;
2049 HEX_DISPLAY(dev->stats.irqs);
2050 do {
2051 u32 udccr = UDCCR;
2052
2053 handled = 0;
2054
2055 /* SUSpend Interrupt Request */
2056 if (unlikely(udccr & UDCCR_SUSIR)) {
2057 udc_ack_int_UDCCR(UDCCR_SUSIR);
2058 handled = 1;
David Brownell91987692005-05-07 13:20:19 -07002059 DBG(DBG_VERBOSE, "USB suspend%s\n", is_vbus_present()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 ? "" : "+disconnect");
2061
David Brownell91987692005-05-07 13:20:19 -07002062 if (!is_vbus_present())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 stop_activity(dev, dev->driver);
2064 else if (dev->gadget.speed != USB_SPEED_UNKNOWN
2065 && dev->driver
2066 && dev->driver->suspend)
2067 dev->driver->suspend(&dev->gadget);
2068 ep0_idle (dev);
2069 }
2070
2071 /* RESume Interrupt Request */
2072 if (unlikely(udccr & UDCCR_RESIR)) {
2073 udc_ack_int_UDCCR(UDCCR_RESIR);
2074 handled = 1;
2075 DBG(DBG_VERBOSE, "USB resume\n");
2076
2077 if (dev->gadget.speed != USB_SPEED_UNKNOWN
2078 && dev->driver
2079 && dev->driver->resume
David Brownell91987692005-05-07 13:20:19 -07002080 && is_vbus_present())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 dev->driver->resume(&dev->gadget);
2082 }
2083
2084 /* ReSeT Interrupt Request - USB reset */
2085 if (unlikely(udccr & UDCCR_RSTIR)) {
2086 udc_ack_int_UDCCR(UDCCR_RSTIR);
2087 handled = 1;
2088
2089 if ((UDCCR & UDCCR_UDA) == 0) {
2090 DBG(DBG_VERBOSE, "USB reset start\n");
2091
2092 /* reset driver and endpoints,
2093 * in case that's not yet done
2094 */
2095 stop_activity (dev, dev->driver);
2096
2097 } else {
2098 DBG(DBG_VERBOSE, "USB reset end\n");
2099 dev->gadget.speed = USB_SPEED_FULL;
2100 LED_CONNECTED_ON;
2101 memset(&dev->stats, 0, sizeof dev->stats);
2102 /* driver and endpoints are still reset */
2103 }
2104
2105 } else {
2106 u32 usir0 = USIR0 & ~UICR0;
2107 u32 usir1 = USIR1 & ~UICR1;
2108 int i;
2109
2110 if (unlikely (!usir0 && !usir1))
2111 continue;
2112
2113 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
2114
2115 /* control traffic */
2116 if (usir0 & USIR0_IR0) {
2117 dev->ep[0].pio_irqs++;
2118 handle_ep0(dev);
2119 handled = 1;
2120 }
2121
2122 /* endpoint data transfers */
2123 for (i = 0; i < 8; i++) {
2124 u32 tmp = 1 << i;
2125
2126 if (i && (usir0 & tmp)) {
2127 handle_ep(&dev->ep[i]);
2128 USIR0 |= tmp;
2129 handled = 1;
2130 }
2131 if (usir1 & tmp) {
2132 handle_ep(&dev->ep[i+8]);
2133 USIR1 |= tmp;
2134 handled = 1;
2135 }
2136 }
2137 }
2138
2139 /* we could also ask for 1 msec SOF (SIR) interrupts */
2140
2141 } while (handled);
2142 return IRQ_HANDLED;
2143}
2144
2145/*-------------------------------------------------------------------------*/
2146
2147static void nop_release (struct device *dev)
2148{
2149 DMSG("%s %s\n", __FUNCTION__, dev->bus_id);
2150}
2151
2152/* this uses load-time allocation and initialization (instead of
2153 * doing it at run-time) to save code, eliminate fault paths, and
2154 * be more obviously correct.
2155 */
2156static struct pxa2xx_udc memory = {
2157 .gadget = {
2158 .ops = &pxa2xx_udc_ops,
2159 .ep0 = &memory.ep[0].ep,
2160 .name = driver_name,
2161 .dev = {
2162 .bus_id = "gadget",
2163 .release = nop_release,
2164 },
2165 },
2166
2167 /* control endpoint */
2168 .ep[0] = {
2169 .ep = {
2170 .name = ep0name,
2171 .ops = &pxa2xx_ep_ops,
2172 .maxpacket = EP0_FIFO_SIZE,
2173 },
2174 .dev = &memory,
2175 .reg_udccs = &UDCCS0,
2176 .reg_uddr = &UDDR0,
2177 },
2178
2179 /* first group of endpoints */
2180 .ep[1] = {
2181 .ep = {
2182 .name = "ep1in-bulk",
2183 .ops = &pxa2xx_ep_ops,
2184 .maxpacket = BULK_FIFO_SIZE,
2185 },
2186 .dev = &memory,
2187 .fifo_size = BULK_FIFO_SIZE,
2188 .bEndpointAddress = USB_DIR_IN | 1,
2189 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2190 .reg_udccs = &UDCCS1,
2191 .reg_uddr = &UDDR1,
2192 drcmr (25)
2193 },
2194 .ep[2] = {
2195 .ep = {
2196 .name = "ep2out-bulk",
2197 .ops = &pxa2xx_ep_ops,
2198 .maxpacket = BULK_FIFO_SIZE,
2199 },
2200 .dev = &memory,
2201 .fifo_size = BULK_FIFO_SIZE,
2202 .bEndpointAddress = 2,
2203 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2204 .reg_udccs = &UDCCS2,
2205 .reg_ubcr = &UBCR2,
2206 .reg_uddr = &UDDR2,
2207 drcmr (26)
2208 },
2209#ifndef CONFIG_USB_PXA2XX_SMALL
2210 .ep[3] = {
2211 .ep = {
2212 .name = "ep3in-iso",
2213 .ops = &pxa2xx_ep_ops,
2214 .maxpacket = ISO_FIFO_SIZE,
2215 },
2216 .dev = &memory,
2217 .fifo_size = ISO_FIFO_SIZE,
2218 .bEndpointAddress = USB_DIR_IN | 3,
2219 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2220 .reg_udccs = &UDCCS3,
2221 .reg_uddr = &UDDR3,
2222 drcmr (27)
2223 },
2224 .ep[4] = {
2225 .ep = {
2226 .name = "ep4out-iso",
2227 .ops = &pxa2xx_ep_ops,
2228 .maxpacket = ISO_FIFO_SIZE,
2229 },
2230 .dev = &memory,
2231 .fifo_size = ISO_FIFO_SIZE,
2232 .bEndpointAddress = 4,
2233 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2234 .reg_udccs = &UDCCS4,
2235 .reg_ubcr = &UBCR4,
2236 .reg_uddr = &UDDR4,
2237 drcmr (28)
2238 },
2239 .ep[5] = {
2240 .ep = {
2241 .name = "ep5in-int",
2242 .ops = &pxa2xx_ep_ops,
2243 .maxpacket = INT_FIFO_SIZE,
2244 },
2245 .dev = &memory,
2246 .fifo_size = INT_FIFO_SIZE,
2247 .bEndpointAddress = USB_DIR_IN | 5,
2248 .bmAttributes = USB_ENDPOINT_XFER_INT,
2249 .reg_udccs = &UDCCS5,
2250 .reg_uddr = &UDDR5,
2251 },
2252
2253 /* second group of endpoints */
2254 .ep[6] = {
2255 .ep = {
2256 .name = "ep6in-bulk",
2257 .ops = &pxa2xx_ep_ops,
2258 .maxpacket = BULK_FIFO_SIZE,
2259 },
2260 .dev = &memory,
2261 .fifo_size = BULK_FIFO_SIZE,
2262 .bEndpointAddress = USB_DIR_IN | 6,
2263 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2264 .reg_udccs = &UDCCS6,
2265 .reg_uddr = &UDDR6,
2266 drcmr (30)
2267 },
2268 .ep[7] = {
2269 .ep = {
2270 .name = "ep7out-bulk",
2271 .ops = &pxa2xx_ep_ops,
2272 .maxpacket = BULK_FIFO_SIZE,
2273 },
2274 .dev = &memory,
2275 .fifo_size = BULK_FIFO_SIZE,
2276 .bEndpointAddress = 7,
2277 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2278 .reg_udccs = &UDCCS7,
2279 .reg_ubcr = &UBCR7,
2280 .reg_uddr = &UDDR7,
2281 drcmr (31)
2282 },
2283 .ep[8] = {
2284 .ep = {
2285 .name = "ep8in-iso",
2286 .ops = &pxa2xx_ep_ops,
2287 .maxpacket = ISO_FIFO_SIZE,
2288 },
2289 .dev = &memory,
2290 .fifo_size = ISO_FIFO_SIZE,
2291 .bEndpointAddress = USB_DIR_IN | 8,
2292 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2293 .reg_udccs = &UDCCS8,
2294 .reg_uddr = &UDDR8,
2295 drcmr (32)
2296 },
2297 .ep[9] = {
2298 .ep = {
2299 .name = "ep9out-iso",
2300 .ops = &pxa2xx_ep_ops,
2301 .maxpacket = ISO_FIFO_SIZE,
2302 },
2303 .dev = &memory,
2304 .fifo_size = ISO_FIFO_SIZE,
2305 .bEndpointAddress = 9,
2306 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2307 .reg_udccs = &UDCCS9,
2308 .reg_ubcr = &UBCR9,
2309 .reg_uddr = &UDDR9,
2310 drcmr (33)
2311 },
2312 .ep[10] = {
2313 .ep = {
2314 .name = "ep10in-int",
2315 .ops = &pxa2xx_ep_ops,
2316 .maxpacket = INT_FIFO_SIZE,
2317 },
2318 .dev = &memory,
2319 .fifo_size = INT_FIFO_SIZE,
2320 .bEndpointAddress = USB_DIR_IN | 10,
2321 .bmAttributes = USB_ENDPOINT_XFER_INT,
2322 .reg_udccs = &UDCCS10,
2323 .reg_uddr = &UDDR10,
2324 },
2325
2326 /* third group of endpoints */
2327 .ep[11] = {
2328 .ep = {
2329 .name = "ep11in-bulk",
2330 .ops = &pxa2xx_ep_ops,
2331 .maxpacket = BULK_FIFO_SIZE,
2332 },
2333 .dev = &memory,
2334 .fifo_size = BULK_FIFO_SIZE,
2335 .bEndpointAddress = USB_DIR_IN | 11,
2336 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2337 .reg_udccs = &UDCCS11,
2338 .reg_uddr = &UDDR11,
2339 drcmr (35)
2340 },
2341 .ep[12] = {
2342 .ep = {
2343 .name = "ep12out-bulk",
2344 .ops = &pxa2xx_ep_ops,
2345 .maxpacket = BULK_FIFO_SIZE,
2346 },
2347 .dev = &memory,
2348 .fifo_size = BULK_FIFO_SIZE,
2349 .bEndpointAddress = 12,
2350 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2351 .reg_udccs = &UDCCS12,
2352 .reg_ubcr = &UBCR12,
2353 .reg_uddr = &UDDR12,
2354 drcmr (36)
2355 },
2356 .ep[13] = {
2357 .ep = {
2358 .name = "ep13in-iso",
2359 .ops = &pxa2xx_ep_ops,
2360 .maxpacket = ISO_FIFO_SIZE,
2361 },
2362 .dev = &memory,
2363 .fifo_size = ISO_FIFO_SIZE,
2364 .bEndpointAddress = USB_DIR_IN | 13,
2365 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2366 .reg_udccs = &UDCCS13,
2367 .reg_uddr = &UDDR13,
2368 drcmr (37)
2369 },
2370 .ep[14] = {
2371 .ep = {
2372 .name = "ep14out-iso",
2373 .ops = &pxa2xx_ep_ops,
2374 .maxpacket = ISO_FIFO_SIZE,
2375 },
2376 .dev = &memory,
2377 .fifo_size = ISO_FIFO_SIZE,
2378 .bEndpointAddress = 14,
2379 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2380 .reg_udccs = &UDCCS14,
2381 .reg_ubcr = &UBCR14,
2382 .reg_uddr = &UDDR14,
2383 drcmr (38)
2384 },
2385 .ep[15] = {
2386 .ep = {
2387 .name = "ep15in-int",
2388 .ops = &pxa2xx_ep_ops,
2389 .maxpacket = INT_FIFO_SIZE,
2390 },
2391 .dev = &memory,
2392 .fifo_size = INT_FIFO_SIZE,
2393 .bEndpointAddress = USB_DIR_IN | 15,
2394 .bmAttributes = USB_ENDPOINT_XFER_INT,
2395 .reg_udccs = &UDCCS15,
2396 .reg_uddr = &UDDR15,
2397 },
2398#endif /* !CONFIG_USB_PXA2XX_SMALL */
2399};
2400
2401#define CP15R0_VENDOR_MASK 0xffffe000
2402
2403#if defined(CONFIG_ARCH_PXA)
2404#define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
2405
2406#elif defined(CONFIG_ARCH_IXP4XX)
2407#define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
2408
2409#endif
2410
2411#define CP15R0_PROD_MASK 0x000003f0
2412#define PXA25x 0x00000100 /* and PXA26x */
2413#define PXA210 0x00000120
2414
2415#define CP15R0_REV_MASK 0x0000000f
2416
2417#define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
2418
2419#define PXA255_A0 0x00000106 /* or PXA260_B1 */
2420#define PXA250_C0 0x00000105 /* or PXA26x_B0 */
2421#define PXA250_B2 0x00000104
2422#define PXA250_B1 0x00000103 /* or PXA260_A0 */
2423#define PXA250_B0 0x00000102
2424#define PXA250_A1 0x00000101
2425#define PXA250_A0 0x00000100
2426
2427#define PXA210_C0 0x00000125
2428#define PXA210_B2 0x00000124
2429#define PXA210_B1 0x00000123
2430#define PXA210_B0 0x00000122
2431#define IXP425_A0 0x000001c1
2432
2433/*
2434 * probe - binds to the platform device
2435 */
2436static int __init pxa2xx_udc_probe(struct device *_dev)
2437{
2438 struct pxa2xx_udc *dev = &memory;
2439 int retval, out_dma = 1;
2440 u32 chiprev;
2441
2442 /* insist on Intel/ARM/XScale */
2443 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
2444 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
2445 printk(KERN_ERR "%s: not XScale!\n", driver_name);
2446 return -ENODEV;
2447 }
2448
2449 /* trigger chiprev-specific logic */
2450 switch (chiprev & CP15R0_PRODREV_MASK) {
2451#if defined(CONFIG_ARCH_PXA)
2452 case PXA255_A0:
2453 dev->has_cfr = 1;
2454 break;
2455 case PXA250_A0:
2456 case PXA250_A1:
2457 /* A0/A1 "not released"; ep 13, 15 unusable */
2458 /* fall through */
2459 case PXA250_B2: case PXA210_B2:
2460 case PXA250_B1: case PXA210_B1:
2461 case PXA250_B0: case PXA210_B0:
2462 out_dma = 0;
2463 /* fall through */
2464 case PXA250_C0: case PXA210_C0:
2465 break;
2466#elif defined(CONFIG_ARCH_IXP4XX)
2467 case IXP425_A0:
2468 out_dma = 0;
2469 break;
2470#endif
2471 default:
2472 out_dma = 0;
2473 printk(KERN_ERR "%s: unrecognized processor: %08x\n",
2474 driver_name, chiprev);
2475 /* iop3xx, ixp4xx, ... */
2476 return -ENODEV;
2477 }
2478
2479 pr_debug("%s: IRQ %d%s%s%s\n", driver_name, IRQ_USB,
2480 dev->has_cfr ? "" : " (!cfr)",
2481 out_dma ? "" : " (broken dma-out)",
2482 SIZE_STR DMASTR
2483 );
2484
2485#ifdef USE_DMA
2486#ifndef USE_OUT_DMA
2487 out_dma = 0;
2488#endif
2489 /* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
2490 if (!out_dma) {
2491 DMSG("disabled OUT dma\n");
2492 dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
2493 dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
2494 dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
2495 }
2496#endif
2497
2498 /* other non-static parts of init */
2499 dev->dev = _dev;
2500 dev->mach = _dev->platform_data;
2501
2502 init_timer(&dev->timer);
2503 dev->timer.function = udc_watchdog;
2504 dev->timer.data = (unsigned long) dev;
2505
2506 device_initialize(&dev->gadget.dev);
2507 dev->gadget.dev.parent = _dev;
2508 dev->gadget.dev.dma_mask = _dev->dma_mask;
2509
2510 the_controller = dev;
2511 dev_set_drvdata(_dev, dev);
2512
2513 udc_disable(dev);
2514 udc_reinit(dev);
2515
David Brownell91987692005-05-07 13:20:19 -07002516 dev->vbus = is_vbus_present();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517
2518 /* irq setup after old hardware state is cleaned up */
2519 retval = request_irq(IRQ_USB, pxa2xx_udc_irq,
2520 SA_INTERRUPT, driver_name, dev);
2521 if (retval != 0) {
2522 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2523 driver_name, IRQ_USB, retval);
2524 return -EBUSY;
2525 }
2526 dev->got_irq = 1;
2527
2528#ifdef CONFIG_ARCH_LUBBOCK
2529 if (machine_is_lubbock()) {
2530 retval = request_irq(LUBBOCK_USB_DISC_IRQ,
2531 lubbock_vbus_irq,
2532 SA_INTERRUPT | SA_SAMPLE_RANDOM,
2533 driver_name, dev);
2534 if (retval != 0) {
2535 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2536 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2537lubbock_fail0:
2538 free_irq(IRQ_USB, dev);
2539 return -EBUSY;
2540 }
2541 retval = request_irq(LUBBOCK_USB_IRQ,
2542 lubbock_vbus_irq,
2543 SA_INTERRUPT | SA_SAMPLE_RANDOM,
2544 driver_name, dev);
2545 if (retval != 0) {
2546 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2547 driver_name, LUBBOCK_USB_IRQ, retval);
2548 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2549 goto lubbock_fail0;
2550 }
2551#ifdef DEBUG
2552 /* with U-Boot (but not BLOB), hex is off by default */
2553 HEX_DISPLAY(dev->stats.irqs);
2554 LUB_DISC_BLNK_LED &= 0xff;
2555#endif
2556 }
2557#endif
2558 create_proc_files();
2559
2560 return 0;
2561}
David Brownell91987692005-05-07 13:20:19 -07002562
2563static void pxa2xx_udc_shutdown(struct device *_dev)
2564{
2565 pullup_off();
2566}
2567
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568static int __exit pxa2xx_udc_remove(struct device *_dev)
2569{
2570 struct pxa2xx_udc *dev = dev_get_drvdata(_dev);
2571
2572 udc_disable(dev);
2573 remove_proc_files();
2574 usb_gadget_unregister_driver(dev->driver);
2575
2576 if (dev->got_irq) {
2577 free_irq(IRQ_USB, dev);
2578 dev->got_irq = 0;
2579 }
2580 if (machine_is_lubbock()) {
2581 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2582 free_irq(LUBBOCK_USB_IRQ, dev);
2583 }
2584 dev_set_drvdata(_dev, NULL);
2585 the_controller = NULL;
2586 return 0;
2587}
2588
2589/*-------------------------------------------------------------------------*/
2590
2591#ifdef CONFIG_PM
2592
2593/* USB suspend (controlled by the host) and system suspend (controlled
2594 * by the PXA) don't necessarily work well together. If USB is active,
2595 * the 48 MHz clock is required; so the system can't enter 33 MHz idle
2596 * mode, or any deeper PM saving state.
2597 *
2598 * For now, we punt and forcibly disconnect from the USB host when PXA
2599 * enters any suspend state. While we're disconnected, we always disable
2600 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
2601 * Boards without software pullup control shouldn't use those states.
2602 * VBUS IRQs should probably be ignored so that the PXA device just acts
2603 * "dead" to USB hosts until system resume.
2604 */
2605static int pxa2xx_udc_suspend(struct device *dev, u32 state, u32 level)
2606{
2607 struct pxa2xx_udc *udc = dev_get_drvdata(dev);
2608
2609 if (level == SUSPEND_POWER_DOWN) {
2610 if (!udc->mach->udc_command)
2611 WARN("USB host won't detect disconnect!\n");
2612 pullup(udc, 0);
2613 }
2614 return 0;
2615}
2616
2617static int pxa2xx_udc_resume(struct device *dev, u32 level)
2618{
2619 struct pxa2xx_udc *udc = dev_get_drvdata(dev);
2620
2621 if (level == RESUME_POWER_ON)
2622 pullup(udc, 1);
2623 return 0;
2624}
2625
2626#else
2627#define pxa2xx_udc_suspend NULL
2628#define pxa2xx_udc_resume NULL
2629#endif
2630
2631/*-------------------------------------------------------------------------*/
2632
2633static struct device_driver udc_driver = {
2634 .name = "pxa2xx-udc",
2635 .bus = &platform_bus_type,
2636 .probe = pxa2xx_udc_probe,
David Brownell91987692005-05-07 13:20:19 -07002637 .shutdown = pxa2xx_udc_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 .remove = __exit_p(pxa2xx_udc_remove),
2639 .suspend = pxa2xx_udc_suspend,
2640 .resume = pxa2xx_udc_resume,
2641};
2642
2643static int __init udc_init(void)
2644{
2645 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
2646 return driver_register(&udc_driver);
2647}
2648module_init(udc_init);
2649
2650static void __exit udc_exit(void)
2651{
2652 driver_unregister(&udc_driver);
2653}
2654module_exit(udc_exit);
2655
2656MODULE_DESCRIPTION(DRIVER_DESC);
2657MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
2658MODULE_LICENSE("GPL");
2659