blob: 02bbf8938bb9ea02d2056f2d0d2f30d563fa429d [file] [log] [blame]
David Mosberger2d531392014-04-28 22:14:07 -06001/*
2 * MAX3421 Host Controller driver for USB.
3 *
4 * Author: David Mosberger-Tang <davidm@egauge.net>
5 *
6 * (C) Copyright 2014 David Mosberger-Tang <davidm@egauge.net>
7 *
8 * MAX3421 is a chip implementing a USB 2.0 Full-/Low-Speed host
9 * controller on a SPI bus.
10 *
11 * Based on:
12 * o MAX3421E datasheet
13 * http://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf
14 * o MAX3421E Programming Guide
15 * http://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf
16 * o gadget/dummy_hcd.c
17 * For USB HCD implementation.
18 * o Arduino MAX3421 driver
19 * https://github.com/felis/USB_Host_Shield_2.0/blob/master/Usb.cpp
20 *
21 * This file is licenced under the GPL v2.
22 *
23 * Important note on worst-case (full-speed) packet size constraints
24 * (See USB 2.0 Section 5.6.3 and following):
25 *
26 * - control: 64 bytes
27 * - isochronous: 1023 bytes
28 * - interrupt: 64 bytes
29 * - bulk: 64 bytes
30 *
31 * Since the MAX3421 FIFO size is 64 bytes, we do not have to work about
32 * multi-FIFO writes/reads for a single USB packet *except* for isochronous
33 * transfers. We don't support isochronous transfers at this time, so we
34 * just assume that a USB packet always fits into a single FIFO buffer.
35 *
36 * NOTE: The June 2006 version of "MAX3421E Programming Guide"
37 * (AN3785) has conflicting info for the RCVDAVIRQ bit:
38 *
39 * The description of RCVDAVIRQ says "The CPU *must* clear
40 * this IRQ bit (by writing a 1 to it) before reading the
41 * RCVFIFO data.
42 *
43 * However, the earlier section on "Programming BULK-IN
44 * Transfers" says * that:
45 *
46 * After the CPU retrieves the data, it clears the
47 * RCVDAVIRQ bit.
48 *
49 * The December 2006 version has been corrected and it consistently
50 * states the second behavior is the correct one.
51 *
52 * Synchronous SPI transactions sleep so we can't perform any such
53 * transactions while holding a spin-lock (and/or while interrupts are
54 * masked). To achieve this, all SPI transactions are issued from a
55 * single thread (max3421_spi_thread).
56 */
57
Asaf Vertz788bfe82014-12-15 09:22:07 +020058#include <linux/jiffies.h>
David Mosberger2d531392014-04-28 22:14:07 -060059#include <linux/module.h>
60#include <linux/spi/spi.h>
61#include <linux/usb.h>
62#include <linux/usb/hcd.h>
Jules Maselbas721fdc82017-09-15 18:58:45 +020063#include <linux/of.h>
David Mosberger2d531392014-04-28 22:14:07 -060064
65#include <linux/platform_data/max3421-hcd.h>
66
67#define DRIVER_DESC "MAX3421 USB Host-Controller Driver"
68#define DRIVER_VERSION "1.0"
69
70/* 11-bit counter that wraps around (USB 2.0 Section 8.3.3): */
71#define USB_MAX_FRAME_NUMBER 0x7ff
72#define USB_MAX_RETRIES 3 /* # of retries before error is reported */
73
74/*
75 * Max. # of times we're willing to retransmit a request immediately in
76 * resposne to a NAK. Afterwards, we fall back on trying once a frame.
77 */
78#define NAK_MAX_FAST_RETRANSMITS 2
79
80#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
81
82/* Port-change mask: */
83#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \
84 USB_PORT_STAT_C_ENABLE | \
85 USB_PORT_STAT_C_SUSPEND | \
86 USB_PORT_STAT_C_OVERCURRENT | \
87 USB_PORT_STAT_C_RESET) << 16)
88
Jules Maselbas721fdc82017-09-15 18:58:45 +020089#define MAX3421_GPOUT_COUNT 8
90
David Mosberger2d531392014-04-28 22:14:07 -060091enum max3421_rh_state {
92 MAX3421_RH_RESET,
93 MAX3421_RH_SUSPENDED,
94 MAX3421_RH_RUNNING
95};
96
97enum pkt_state {
98 PKT_STATE_SETUP, /* waiting to send setup packet to ctrl pipe */
99 PKT_STATE_TRANSFER, /* waiting to xfer transfer_buffer */
100 PKT_STATE_TERMINATE /* waiting to terminate control transfer */
101};
102
103enum scheduling_pass {
104 SCHED_PASS_PERIODIC,
105 SCHED_PASS_NON_PERIODIC,
106 SCHED_PASS_DONE
107};
108
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -0600109/* Bit numbers for max3421_hcd->todo: */
110enum {
111 ENABLE_IRQ = 0,
112 RESET_HCD,
113 RESET_PORT,
114 CHECK_UNLINK,
115 IOPIN_UPDATE
116};
117
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600118struct max3421_dma_buf {
119 u8 data[2];
120};
121
David Mosberger2d531392014-04-28 22:14:07 -0600122struct max3421_hcd {
123 spinlock_t lock;
124
125 struct task_struct *spi_thread;
126
127 struct max3421_hcd *next;
128
129 enum max3421_rh_state rh_state;
130 /* lower 16 bits contain port status, upper 16 bits the change mask: */
131 u32 port_status;
132
133 unsigned active:1;
134
135 struct list_head ep_list; /* list of EP's with work */
136
137 /*
138 * The following are owned by spi_thread (may be accessed by
139 * SPI-thread without acquiring the HCD lock:
140 */
141 u8 rev; /* chip revision */
142 u16 frame_number;
143 /*
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600144 * kmalloc'd buffers guaranteed to be in separate (DMA)
145 * cache-lines:
146 */
147 struct max3421_dma_buf *tx;
148 struct max3421_dma_buf *rx;
149 /*
David Mosberger2d531392014-04-28 22:14:07 -0600150 * URB we're currently processing. Must not be reset to NULL
151 * unless MAX3421E chip is idle:
152 */
153 struct urb *curr_urb;
154 enum scheduling_pass sched_pass;
155 struct usb_device *loaded_dev; /* dev that's loaded into the chip */
156 int loaded_epnum; /* epnum whose toggles are loaded */
157 int urb_done; /* > 0 -> no errors, < 0: errno */
158 size_t curr_len;
159 u8 hien;
160 u8 mode;
161 u8 iopins[2];
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -0600162 unsigned long todo;
David Mosberger2d531392014-04-28 22:14:07 -0600163#ifdef DEBUG
164 unsigned long err_stat[16];
165#endif
166};
167
168struct max3421_ep {
169 struct usb_host_endpoint *ep;
170 struct list_head ep_list;
171 u32 naks;
172 u16 last_active; /* frame # this ep was last active */
173 enum pkt_state pkt_state;
174 u8 retries;
175 u8 retransmit; /* packet needs retransmission */
176};
177
178static struct max3421_hcd *max3421_hcd_list;
179
180#define MAX3421_FIFO_SIZE 64
181
182#define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */
183#define MAX3421_SPI_DIR_WR 1 /* write register to MAX3421 */
184
185/* SPI commands: */
186#define MAX3421_SPI_DIR_SHIFT 1
187#define MAX3421_SPI_REG_SHIFT 3
188
189#define MAX3421_REG_RCVFIFO 1
190#define MAX3421_REG_SNDFIFO 2
191#define MAX3421_REG_SUDFIFO 4
192#define MAX3421_REG_RCVBC 6
193#define MAX3421_REG_SNDBC 7
194#define MAX3421_REG_USBIRQ 13
195#define MAX3421_REG_USBIEN 14
196#define MAX3421_REG_USBCTL 15
197#define MAX3421_REG_CPUCTL 16
198#define MAX3421_REG_PINCTL 17
199#define MAX3421_REG_REVISION 18
200#define MAX3421_REG_IOPINS1 20
201#define MAX3421_REG_IOPINS2 21
202#define MAX3421_REG_GPINIRQ 22
203#define MAX3421_REG_GPINIEN 23
204#define MAX3421_REG_GPINPOL 24
205#define MAX3421_REG_HIRQ 25
206#define MAX3421_REG_HIEN 26
207#define MAX3421_REG_MODE 27
208#define MAX3421_REG_PERADDR 28
209#define MAX3421_REG_HCTL 29
210#define MAX3421_REG_HXFR 30
211#define MAX3421_REG_HRSL 31
212
213enum {
214 MAX3421_USBIRQ_OSCOKIRQ_BIT = 0,
215 MAX3421_USBIRQ_NOVBUSIRQ_BIT = 5,
216 MAX3421_USBIRQ_VBUSIRQ_BIT
217};
218
219enum {
220 MAX3421_CPUCTL_IE_BIT = 0,
221 MAX3421_CPUCTL_PULSEWID0_BIT = 6,
222 MAX3421_CPUCTL_PULSEWID1_BIT
223};
224
225enum {
226 MAX3421_USBCTL_PWRDOWN_BIT = 4,
227 MAX3421_USBCTL_CHIPRES_BIT
228};
229
230enum {
231 MAX3421_PINCTL_GPXA_BIT = 0,
232 MAX3421_PINCTL_GPXB_BIT,
233 MAX3421_PINCTL_POSINT_BIT,
234 MAX3421_PINCTL_INTLEVEL_BIT,
235 MAX3421_PINCTL_FDUPSPI_BIT,
236 MAX3421_PINCTL_EP0INAK_BIT,
237 MAX3421_PINCTL_EP2INAK_BIT,
238 MAX3421_PINCTL_EP3INAK_BIT,
239};
240
241enum {
242 MAX3421_HI_BUSEVENT_BIT = 0, /* bus-reset/-resume */
243 MAX3421_HI_RWU_BIT, /* remote wakeup */
244 MAX3421_HI_RCVDAV_BIT, /* receive FIFO data available */
245 MAX3421_HI_SNDBAV_BIT, /* send buffer available */
246 MAX3421_HI_SUSDN_BIT, /* suspend operation done */
247 MAX3421_HI_CONDET_BIT, /* peripheral connect/disconnect */
248 MAX3421_HI_FRAME_BIT, /* frame generator */
249 MAX3421_HI_HXFRDN_BIT, /* host transfer done */
250};
251
252enum {
253 MAX3421_HCTL_BUSRST_BIT = 0,
254 MAX3421_HCTL_FRMRST_BIT,
255 MAX3421_HCTL_SAMPLEBUS_BIT,
256 MAX3421_HCTL_SIGRSM_BIT,
257 MAX3421_HCTL_RCVTOG0_BIT,
258 MAX3421_HCTL_RCVTOG1_BIT,
259 MAX3421_HCTL_SNDTOG0_BIT,
260 MAX3421_HCTL_SNDTOG1_BIT
261};
262
263enum {
264 MAX3421_MODE_HOST_BIT = 0,
265 MAX3421_MODE_LOWSPEED_BIT,
266 MAX3421_MODE_HUBPRE_BIT,
267 MAX3421_MODE_SOFKAENAB_BIT,
268 MAX3421_MODE_SEPIRQ_BIT,
269 MAX3421_MODE_DELAYISO_BIT,
270 MAX3421_MODE_DMPULLDN_BIT,
271 MAX3421_MODE_DPPULLDN_BIT
272};
273
274enum {
275 MAX3421_HRSL_OK = 0,
276 MAX3421_HRSL_BUSY,
277 MAX3421_HRSL_BADREQ,
278 MAX3421_HRSL_UNDEF,
279 MAX3421_HRSL_NAK,
280 MAX3421_HRSL_STALL,
281 MAX3421_HRSL_TOGERR,
282 MAX3421_HRSL_WRONGPID,
283 MAX3421_HRSL_BADBC,
284 MAX3421_HRSL_PIDERR,
285 MAX3421_HRSL_PKTERR,
286 MAX3421_HRSL_CRCERR,
287 MAX3421_HRSL_KERR,
288 MAX3421_HRSL_JERR,
289 MAX3421_HRSL_TIMEOUT,
290 MAX3421_HRSL_BABBLE,
291 MAX3421_HRSL_RESULT_MASK = 0xf,
292 MAX3421_HRSL_RCVTOGRD_BIT = 4,
293 MAX3421_HRSL_SNDTOGRD_BIT,
294 MAX3421_HRSL_KSTATUS_BIT,
295 MAX3421_HRSL_JSTATUS_BIT
296};
297
298/* Return same error-codes as ohci.h:cc_to_error: */
299static const int hrsl_to_error[] = {
300 [MAX3421_HRSL_OK] = 0,
301 [MAX3421_HRSL_BUSY] = -EINVAL,
302 [MAX3421_HRSL_BADREQ] = -EINVAL,
303 [MAX3421_HRSL_UNDEF] = -EINVAL,
304 [MAX3421_HRSL_NAK] = -EAGAIN,
305 [MAX3421_HRSL_STALL] = -EPIPE,
306 [MAX3421_HRSL_TOGERR] = -EILSEQ,
307 [MAX3421_HRSL_WRONGPID] = -EPROTO,
308 [MAX3421_HRSL_BADBC] = -EREMOTEIO,
309 [MAX3421_HRSL_PIDERR] = -EPROTO,
310 [MAX3421_HRSL_PKTERR] = -EPROTO,
311 [MAX3421_HRSL_CRCERR] = -EILSEQ,
312 [MAX3421_HRSL_KERR] = -EIO,
313 [MAX3421_HRSL_JERR] = -EIO,
314 [MAX3421_HRSL_TIMEOUT] = -ETIME,
315 [MAX3421_HRSL_BABBLE] = -EOVERFLOW
316};
317
318/*
319 * See http://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a
320 * reasonable overview of how control transfers use the the IN/OUT
321 * tokens.
322 */
323#define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */
324#define MAX3421_HXFR_SETUP 0x10
325#define MAX3421_HXFR_BULK_OUT(ep) (0x20 | (ep)) /* bulk or interrupt */
326#define MAX3421_HXFR_ISO_IN(ep) (0x40 | (ep))
327#define MAX3421_HXFR_ISO_OUT(ep) (0x60 | (ep))
328#define MAX3421_HXFR_HS_IN 0x80 /* handshake in */
329#define MAX3421_HXFR_HS_OUT 0xa0 /* handshake out */
330
331#define field(val, bit) ((val) << (bit))
332
333static inline s16
334frame_diff(u16 left, u16 right)
335{
336 return ((unsigned) (left - right)) % (USB_MAX_FRAME_NUMBER + 1);
337}
338
339static inline struct max3421_hcd *
340hcd_to_max3421(struct usb_hcd *hcd)
341{
342 return (struct max3421_hcd *) hcd->hcd_priv;
343}
344
345static inline struct usb_hcd *
346max3421_to_hcd(struct max3421_hcd *max3421_hcd)
347{
348 return container_of((void *) max3421_hcd, struct usb_hcd, hcd_priv);
349}
350
351static u8
352spi_rd8(struct usb_hcd *hcd, unsigned int reg)
353{
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600354 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600355 struct spi_device *spi = to_spi_device(hcd->self.controller);
356 struct spi_transfer transfer;
David Mosberger2d531392014-04-28 22:14:07 -0600357 struct spi_message msg;
358
359 memset(&transfer, 0, sizeof(transfer));
360
361 spi_message_init(&msg);
362
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600363 max3421_hcd->tx->data[0] =
364 (field(reg, MAX3421_SPI_REG_SHIFT) |
365 field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
David Mosberger2d531392014-04-28 22:14:07 -0600366
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600367 transfer.tx_buf = max3421_hcd->tx->data;
368 transfer.rx_buf = max3421_hcd->rx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600369 transfer.len = 2;
370
371 spi_message_add_tail(&transfer, &msg);
372 spi_sync(spi, &msg);
373
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600374 return max3421_hcd->rx->data[1];
David Mosberger2d531392014-04-28 22:14:07 -0600375}
376
377static void
378spi_wr8(struct usb_hcd *hcd, unsigned int reg, u8 val)
379{
380 struct spi_device *spi = to_spi_device(hcd->self.controller);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600381 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600382 struct spi_transfer transfer;
383 struct spi_message msg;
David Mosberger2d531392014-04-28 22:14:07 -0600384
385 memset(&transfer, 0, sizeof(transfer));
386
387 spi_message_init(&msg);
388
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600389 max3421_hcd->tx->data[0] =
390 (field(reg, MAX3421_SPI_REG_SHIFT) |
391 field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
392 max3421_hcd->tx->data[1] = val;
David Mosberger2d531392014-04-28 22:14:07 -0600393
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600394 transfer.tx_buf = max3421_hcd->tx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600395 transfer.len = 2;
396
397 spi_message_add_tail(&transfer, &msg);
398 spi_sync(spi, &msg);
399}
400
401static void
402spi_rd_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
403{
404 struct spi_device *spi = to_spi_device(hcd->self.controller);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600405 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600406 struct spi_transfer transfer[2];
407 struct spi_message msg;
David Mosberger2d531392014-04-28 22:14:07 -0600408
409 memset(transfer, 0, sizeof(transfer));
410
411 spi_message_init(&msg);
412
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600413 max3421_hcd->tx->data[0] =
414 (field(reg, MAX3421_SPI_REG_SHIFT) |
415 field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
416 transfer[0].tx_buf = max3421_hcd->tx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600417 transfer[0].len = 1;
418
419 transfer[1].rx_buf = buf;
420 transfer[1].len = len;
421
422 spi_message_add_tail(&transfer[0], &msg);
423 spi_message_add_tail(&transfer[1], &msg);
424 spi_sync(spi, &msg);
425}
426
427static void
428spi_wr_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
429{
430 struct spi_device *spi = to_spi_device(hcd->self.controller);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600431 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600432 struct spi_transfer transfer[2];
433 struct spi_message msg;
David Mosberger2d531392014-04-28 22:14:07 -0600434
435 memset(transfer, 0, sizeof(transfer));
436
437 spi_message_init(&msg);
438
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600439 max3421_hcd->tx->data[0] =
440 (field(reg, MAX3421_SPI_REG_SHIFT) |
441 field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
David Mosberger2d531392014-04-28 22:14:07 -0600442
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600443 transfer[0].tx_buf = max3421_hcd->tx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600444 transfer[0].len = 1;
445
446 transfer[1].tx_buf = buf;
447 transfer[1].len = len;
448
449 spi_message_add_tail(&transfer[0], &msg);
450 spi_message_add_tail(&transfer[1], &msg);
451 spi_sync(spi, &msg);
452}
453
454/*
455 * Figure out the correct setting for the LOWSPEED and HUBPRE mode
456 * bits. The HUBPRE bit needs to be set when MAX3421E operates at
457 * full speed, but it's talking to a low-speed device (i.e., through a
458 * hub). Setting that bit ensures that every low-speed packet is
459 * preceded by a full-speed PRE PID. Possible configurations:
460 *
461 * Hub speed: Device speed: => LOWSPEED bit: HUBPRE bit:
462 * FULL FULL => 0 0
463 * FULL LOW => 1 1
464 * LOW LOW => 1 0
465 * LOW FULL => 1 0
466 */
467static void
468max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
469{
470 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
471 u8 mode_lowspeed, mode_hubpre, mode = max3421_hcd->mode;
472
473 mode_lowspeed = BIT(MAX3421_MODE_LOWSPEED_BIT);
474 mode_hubpre = BIT(MAX3421_MODE_HUBPRE_BIT);
475 if (max3421_hcd->port_status & USB_PORT_STAT_LOW_SPEED) {
476 mode |= mode_lowspeed;
477 mode &= ~mode_hubpre;
478 } else if (dev->speed == USB_SPEED_LOW) {
479 mode |= mode_lowspeed | mode_hubpre;
480 } else {
481 mode &= ~(mode_lowspeed | mode_hubpre);
482 }
483 if (mode != max3421_hcd->mode) {
484 max3421_hcd->mode = mode;
485 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
486 }
487
488}
489
490/*
491 * Caller must NOT hold HCD spinlock.
492 */
493static void
494max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
495 int force_toggles)
496{
497 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
498 int old_epnum, same_ep, rcvtog, sndtog;
499 struct usb_device *old_dev;
500 u8 hctl;
501
502 old_dev = max3421_hcd->loaded_dev;
503 old_epnum = max3421_hcd->loaded_epnum;
504
505 same_ep = (dev == old_dev && epnum == old_epnum);
506 if (same_ep && !force_toggles)
507 return;
508
509 if (old_dev && !same_ep) {
510 /* save the old end-points toggles: */
511 u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
512
513 rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
514 sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
515
516 /* no locking: HCD (i.e., we) own toggles, don't we? */
517 usb_settoggle(old_dev, old_epnum, 0, rcvtog);
518 usb_settoggle(old_dev, old_epnum, 1, sndtog);
519 }
520 /* setup new endpoint's toggle bits: */
521 rcvtog = usb_gettoggle(dev, epnum, 0);
522 sndtog = usb_gettoggle(dev, epnum, 1);
523 hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
524 BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
525
526 max3421_hcd->loaded_epnum = epnum;
527 spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
528
529 /*
530 * Note: devnum for one and the same device can change during
531 * address-assignment so it's best to just always load the
532 * address whenever the end-point changed/was forced.
533 */
534 max3421_hcd->loaded_dev = dev;
535 spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
536}
537
538static int
539max3421_ctrl_setup(struct usb_hcd *hcd, struct urb *urb)
540{
541 spi_wr_buf(hcd, MAX3421_REG_SUDFIFO, urb->setup_packet, 8);
542 return MAX3421_HXFR_SETUP;
543}
544
545static int
546max3421_transfer_in(struct usb_hcd *hcd, struct urb *urb)
547{
548 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
549 int epnum = usb_pipeendpoint(urb->pipe);
550
551 max3421_hcd->curr_len = 0;
552 max3421_hcd->hien |= BIT(MAX3421_HI_RCVDAV_BIT);
553 return MAX3421_HXFR_BULK_IN(epnum);
554}
555
556static int
557max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
558{
559 struct spi_device *spi = to_spi_device(hcd->self.controller);
560 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
561 int epnum = usb_pipeendpoint(urb->pipe);
562 u32 max_packet;
563 void *src;
564
565 src = urb->transfer_buffer + urb->actual_length;
566
567 if (fast_retransmit) {
568 if (max3421_hcd->rev == 0x12) {
569 /* work around rev 0x12 bug: */
570 spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
571 spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]);
572 spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
573 }
574 return MAX3421_HXFR_BULK_OUT(epnum);
575 }
576
577 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
578
579 if (max_packet > MAX3421_FIFO_SIZE) {
580 /*
581 * We do not support isochronous transfers at this
582 * time.
583 */
584 dev_err(&spi->dev,
585 "%s: packet-size of %u too big (limit is %u bytes)",
586 __func__, max_packet, MAX3421_FIFO_SIZE);
587 max3421_hcd->urb_done = -EMSGSIZE;
588 return -EMSGSIZE;
589 }
590 max3421_hcd->curr_len = min((urb->transfer_buffer_length -
591 urb->actual_length), max_packet);
592
593 spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len);
594 spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
595 return MAX3421_HXFR_BULK_OUT(epnum);
596}
597
598/*
599 * Issue the next host-transfer command.
600 * Caller must NOT hold HCD spinlock.
601 */
602static void
603max3421_next_transfer(struct usb_hcd *hcd, int fast_retransmit)
604{
605 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
606 struct urb *urb = max3421_hcd->curr_urb;
David Mosberger-Tangf9da25c2014-05-28 10:06:24 -0600607 struct max3421_ep *max3421_ep;
David Mosberger2d531392014-04-28 22:14:07 -0600608 int cmd = -EINVAL;
609
610 if (!urb)
611 return; /* nothing to do */
612
David Mosberger-Tangf9da25c2014-05-28 10:06:24 -0600613 max3421_ep = urb->ep->hcpriv;
614
David Mosberger2d531392014-04-28 22:14:07 -0600615 switch (max3421_ep->pkt_state) {
616 case PKT_STATE_SETUP:
617 cmd = max3421_ctrl_setup(hcd, urb);
618 break;
619
620 case PKT_STATE_TRANSFER:
621 if (usb_urb_dir_in(urb))
622 cmd = max3421_transfer_in(hcd, urb);
623 else
624 cmd = max3421_transfer_out(hcd, urb, fast_retransmit);
625 break;
626
627 case PKT_STATE_TERMINATE:
628 /*
629 * IN transfers are terminated with HS_OUT token,
630 * OUT transfers with HS_IN:
631 */
632 if (usb_urb_dir_in(urb))
633 cmd = MAX3421_HXFR_HS_OUT;
634 else
635 cmd = MAX3421_HXFR_HS_IN;
636 break;
637 }
638
639 if (cmd < 0)
640 return;
641
642 /* issue the command and wait for host-xfer-done interrupt: */
643
644 spi_wr8(hcd, MAX3421_REG_HXFR, cmd);
645 max3421_hcd->hien |= BIT(MAX3421_HI_HXFRDN_BIT);
646}
647
648/*
649 * Find the next URB to process and start its execution.
650 *
651 * At this time, we do not anticipate ever connecting a USB hub to the
652 * MAX3421 chip, so at most USB device can be connected and we can use
653 * a simplistic scheduler: at the start of a frame, schedule all
654 * periodic transfers. Once that is done, use the remainder of the
655 * frame to process non-periodic (bulk & control) transfers.
656 *
657 * Preconditions:
658 * o Caller must NOT hold HCD spinlock.
659 * o max3421_hcd->curr_urb MUST BE NULL.
660 * o MAX3421E chip must be idle.
661 */
662static int
663max3421_select_and_start_urb(struct usb_hcd *hcd)
664{
665 struct spi_device *spi = to_spi_device(hcd->self.controller);
666 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
667 struct urb *urb, *curr_urb = NULL;
668 struct max3421_ep *max3421_ep;
669 int epnum, force_toggles = 0;
670 struct usb_host_endpoint *ep;
671 struct list_head *pos;
672 unsigned long flags;
673
674 spin_lock_irqsave(&max3421_hcd->lock, flags);
675
676 for (;
677 max3421_hcd->sched_pass < SCHED_PASS_DONE;
678 ++max3421_hcd->sched_pass)
679 list_for_each(pos, &max3421_hcd->ep_list) {
680 urb = NULL;
681 max3421_ep = container_of(pos, struct max3421_ep,
682 ep_list);
683 ep = max3421_ep->ep;
684
685 switch (usb_endpoint_type(&ep->desc)) {
686 case USB_ENDPOINT_XFER_ISOC:
687 case USB_ENDPOINT_XFER_INT:
688 if (max3421_hcd->sched_pass !=
689 SCHED_PASS_PERIODIC)
690 continue;
691 break;
692
693 case USB_ENDPOINT_XFER_CONTROL:
694 case USB_ENDPOINT_XFER_BULK:
695 if (max3421_hcd->sched_pass !=
696 SCHED_PASS_NON_PERIODIC)
697 continue;
698 break;
699 }
700
701 if (list_empty(&ep->urb_list))
702 continue; /* nothing to do */
703 urb = list_first_entry(&ep->urb_list, struct urb,
704 urb_list);
705 if (urb->unlinked) {
706 dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
707 __func__, urb, urb->unlinked);
708 max3421_hcd->curr_urb = urb;
709 max3421_hcd->urb_done = 1;
710 spin_unlock_irqrestore(&max3421_hcd->lock,
711 flags);
712 return 1;
713 }
714
715 switch (usb_endpoint_type(&ep->desc)) {
716 case USB_ENDPOINT_XFER_CONTROL:
717 /*
718 * Allow one control transaction per
719 * frame per endpoint:
720 */
721 if (frame_diff(max3421_ep->last_active,
722 max3421_hcd->frame_number) == 0)
723 continue;
724 break;
725
726 case USB_ENDPOINT_XFER_BULK:
727 if (max3421_ep->retransmit
728 && (frame_diff(max3421_ep->last_active,
729 max3421_hcd->frame_number)
730 == 0))
731 /*
732 * We already tried this EP
733 * during this frame and got a
734 * NAK or error; wait for next frame
735 */
736 continue;
737 break;
738
739 case USB_ENDPOINT_XFER_ISOC:
740 case USB_ENDPOINT_XFER_INT:
741 if (frame_diff(max3421_hcd->frame_number,
742 max3421_ep->last_active)
743 < urb->interval)
744 /*
745 * We already processed this
746 * end-point in the current
747 * frame
748 */
749 continue;
750 break;
751 }
752
753 /* move current ep to tail: */
754 list_move_tail(pos, &max3421_hcd->ep_list);
755 curr_urb = urb;
756 goto done;
757 }
758done:
759 if (!curr_urb) {
760 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
761 return 0;
762 }
763
764 urb = max3421_hcd->curr_urb = curr_urb;
765 epnum = usb_endpoint_num(&urb->ep->desc);
766 if (max3421_ep->retransmit)
767 /* restart (part of) a USB transaction: */
768 max3421_ep->retransmit = 0;
769 else {
770 /* start USB transaction: */
771 if (usb_endpoint_xfer_control(&ep->desc)) {
772 /*
773 * See USB 2.0 spec section 8.6.1
774 * Initialization via SETUP Token:
775 */
776 usb_settoggle(urb->dev, epnum, 0, 1);
777 usb_settoggle(urb->dev, epnum, 1, 1);
778 max3421_ep->pkt_state = PKT_STATE_SETUP;
779 force_toggles = 1;
780 } else
781 max3421_ep->pkt_state = PKT_STATE_TRANSFER;
782 }
783
784 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
785
786 max3421_ep->last_active = max3421_hcd->frame_number;
787 max3421_set_address(hcd, urb->dev, epnum, force_toggles);
788 max3421_set_speed(hcd, urb->dev);
789 max3421_next_transfer(hcd, 0);
790 return 1;
791}
792
793/*
794 * Check all endpoints for URBs that got unlinked.
795 *
796 * Caller must NOT hold HCD spinlock.
797 */
798static int
799max3421_check_unlink(struct usb_hcd *hcd)
800{
801 struct spi_device *spi = to_spi_device(hcd->self.controller);
802 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600803 struct max3421_ep *max3421_ep;
804 struct usb_host_endpoint *ep;
Geliang Tang553c2362015-12-20 00:11:50 +0800805 struct urb *urb, *next;
David Mosberger2d531392014-04-28 22:14:07 -0600806 unsigned long flags;
807 int retval = 0;
808
809 spin_lock_irqsave(&max3421_hcd->lock, flags);
Geliang Tang553c2362015-12-20 00:11:50 +0800810 list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
David Mosberger2d531392014-04-28 22:14:07 -0600811 ep = max3421_ep->ep;
Geliang Tang553c2362015-12-20 00:11:50 +0800812 list_for_each_entry_safe(urb, next, &ep->urb_list, urb_list) {
David Mosberger2d531392014-04-28 22:14:07 -0600813 if (urb->unlinked) {
814 retval = 1;
815 dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
816 __func__, urb, urb->unlinked);
817 usb_hcd_unlink_urb_from_ep(hcd, urb);
818 spin_unlock_irqrestore(&max3421_hcd->lock,
819 flags);
820 usb_hcd_giveback_urb(hcd, urb, 0);
821 spin_lock_irqsave(&max3421_hcd->lock, flags);
822 }
823 }
824 }
825 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
826 return retval;
827}
828
829/*
830 * Caller must NOT hold HCD spinlock.
831 */
832static void
833max3421_slow_retransmit(struct usb_hcd *hcd)
834{
835 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
836 struct urb *urb = max3421_hcd->curr_urb;
837 struct max3421_ep *max3421_ep;
838
839 max3421_ep = urb->ep->hcpriv;
840 max3421_ep->retransmit = 1;
841 max3421_hcd->curr_urb = NULL;
842}
843
844/*
845 * Caller must NOT hold HCD spinlock.
846 */
847static void
848max3421_recv_data_available(struct usb_hcd *hcd)
849{
850 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
851 struct urb *urb = max3421_hcd->curr_urb;
852 size_t remaining, transfer_size;
853 u8 rcvbc;
854
855 rcvbc = spi_rd8(hcd, MAX3421_REG_RCVBC);
856
857 if (rcvbc > MAX3421_FIFO_SIZE)
858 rcvbc = MAX3421_FIFO_SIZE;
859 if (urb->actual_length >= urb->transfer_buffer_length)
860 remaining = 0;
861 else
862 remaining = urb->transfer_buffer_length - urb->actual_length;
863 transfer_size = rcvbc;
864 if (transfer_size > remaining)
865 transfer_size = remaining;
866 if (transfer_size > 0) {
867 void *dst = urb->transfer_buffer + urb->actual_length;
868
869 spi_rd_buf(hcd, MAX3421_REG_RCVFIFO, dst, transfer_size);
870 urb->actual_length += transfer_size;
871 max3421_hcd->curr_len = transfer_size;
872 }
873
874 /* ack the RCVDAV irq now that the FIFO has been read: */
875 spi_wr8(hcd, MAX3421_REG_HIRQ, BIT(MAX3421_HI_RCVDAV_BIT));
876}
877
878static void
879max3421_handle_error(struct usb_hcd *hcd, u8 hrsl)
880{
881 struct spi_device *spi = to_spi_device(hcd->self.controller);
882 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
883 u8 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
884 struct urb *urb = max3421_hcd->curr_urb;
885 struct max3421_ep *max3421_ep = urb->ep->hcpriv;
886 int switch_sndfifo;
887
888 /*
889 * If an OUT command results in any response other than OK
890 * (i.e., error or NAK), we have to perform a dummy-write to
891 * SNDBC so the FIFO gets switched back to us. Otherwise, we
892 * get out of sync with the SNDFIFO double buffer.
893 */
894 switch_sndfifo = (max3421_ep->pkt_state == PKT_STATE_TRANSFER &&
895 usb_urb_dir_out(urb));
896
897 switch (result_code) {
898 case MAX3421_HRSL_OK:
899 return; /* this shouldn't happen */
900
901 case MAX3421_HRSL_WRONGPID: /* received wrong PID */
902 case MAX3421_HRSL_BUSY: /* SIE busy */
903 case MAX3421_HRSL_BADREQ: /* bad val in HXFR */
904 case MAX3421_HRSL_UNDEF: /* reserved */
905 case MAX3421_HRSL_KERR: /* K-state instead of response */
906 case MAX3421_HRSL_JERR: /* J-state instead of response */
907 /*
908 * packet experienced an error that we cannot recover
909 * from; report error
910 */
911 max3421_hcd->urb_done = hrsl_to_error[result_code];
912 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
913 __func__, hrsl);
914 break;
915
916 case MAX3421_HRSL_TOGERR:
917 if (usb_urb_dir_in(urb))
918 ; /* don't do anything (device will switch toggle) */
919 else {
920 /* flip the send toggle bit: */
921 int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
922
923 sndtog ^= 1;
924 spi_wr8(hcd, MAX3421_REG_HCTL,
925 BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
926 }
927 /* FALL THROUGH */
928 case MAX3421_HRSL_BADBC: /* bad byte count */
929 case MAX3421_HRSL_PIDERR: /* received PID is corrupted */
930 case MAX3421_HRSL_PKTERR: /* packet error (stuff, EOP) */
931 case MAX3421_HRSL_CRCERR: /* CRC error */
932 case MAX3421_HRSL_BABBLE: /* device talked too long */
933 case MAX3421_HRSL_TIMEOUT:
934 if (max3421_ep->retries++ < USB_MAX_RETRIES)
935 /* retry the packet again in the next frame */
936 max3421_slow_retransmit(hcd);
937 else {
938 /* Based on ohci.h cc_to_err[]: */
939 max3421_hcd->urb_done = hrsl_to_error[result_code];
940 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
941 __func__, hrsl);
942 }
943 break;
944
945 case MAX3421_HRSL_STALL:
946 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
947 __func__, hrsl);
948 max3421_hcd->urb_done = hrsl_to_error[result_code];
949 break;
950
951 case MAX3421_HRSL_NAK:
952 /*
953 * Device wasn't ready for data or has no data
954 * available: retry the packet again.
955 */
956 if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) {
957 max3421_next_transfer(hcd, 1);
958 switch_sndfifo = 0;
959 } else
960 max3421_slow_retransmit(hcd);
961 break;
962 }
963 if (switch_sndfifo)
964 spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
965}
966
967/*
968 * Caller must NOT hold HCD spinlock.
969 */
970static int
971max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
972{
973 struct spi_device *spi = to_spi_device(hcd->self.controller);
974 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
975 u32 max_packet;
976
977 if (urb->actual_length >= urb->transfer_buffer_length)
978 return 1; /* read is complete, so we're done */
979
980 /*
981 * USB 2.0 Section 5.3.2 Pipes: packets must be full size
982 * except for last one.
983 */
984 max_packet = usb_maxpacket(urb->dev, urb->pipe, 0);
985 if (max_packet > MAX3421_FIFO_SIZE) {
986 /*
987 * We do not support isochronous transfers at this
988 * time...
989 */
990 dev_err(&spi->dev,
991 "%s: packet-size of %u too big (limit is %u bytes)",
992 __func__, max_packet, MAX3421_FIFO_SIZE);
993 return -EINVAL;
994 }
995
996 if (max3421_hcd->curr_len < max_packet) {
997 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
998 /*
999 * remaining > 0 and received an
1000 * unexpected partial packet ->
1001 * error
1002 */
1003 return -EREMOTEIO;
1004 } else
1005 /* short read, but it's OK */
1006 return 1;
1007 }
1008 return 0; /* not done */
1009}
1010
1011/*
1012 * Caller must NOT hold HCD spinlock.
1013 */
1014static int
1015max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
1016{
1017 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1018
1019 urb->actual_length += max3421_hcd->curr_len;
1020 if (urb->actual_length < urb->transfer_buffer_length)
1021 return 0;
1022 if (urb->transfer_flags & URB_ZERO_PACKET) {
1023 /*
1024 * Some hardware needs a zero-size packet at the end
1025 * of a bulk-out transfer if the last transfer was a
1026 * full-sized packet (i.e., such hardware use <
1027 * max_packet as an indicator that the end of the
1028 * packet has been reached).
1029 */
1030 u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
1031
1032 if (max3421_hcd->curr_len == max_packet)
1033 return 0;
1034 }
1035 return 1;
1036}
1037
1038/*
1039 * Caller must NOT hold HCD spinlock.
1040 */
1041static void
1042max3421_host_transfer_done(struct usb_hcd *hcd)
1043{
1044 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1045 struct urb *urb = max3421_hcd->curr_urb;
1046 struct max3421_ep *max3421_ep;
1047 u8 result_code, hrsl;
1048 int urb_done = 0;
1049
1050 max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
1051 BIT(MAX3421_HI_RCVDAV_BIT));
1052
1053 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
1054 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
1055
1056#ifdef DEBUG
1057 ++max3421_hcd->err_stat[result_code];
1058#endif
1059
1060 max3421_ep = urb->ep->hcpriv;
1061
1062 if (unlikely(result_code != MAX3421_HRSL_OK)) {
1063 max3421_handle_error(hcd, hrsl);
1064 return;
1065 }
1066
1067 max3421_ep->naks = 0;
1068 max3421_ep->retries = 0;
1069 switch (max3421_ep->pkt_state) {
1070
1071 case PKT_STATE_SETUP:
1072 if (urb->transfer_buffer_length > 0)
1073 max3421_ep->pkt_state = PKT_STATE_TRANSFER;
1074 else
1075 max3421_ep->pkt_state = PKT_STATE_TERMINATE;
1076 break;
1077
1078 case PKT_STATE_TRANSFER:
1079 if (usb_urb_dir_in(urb))
1080 urb_done = max3421_transfer_in_done(hcd, urb);
1081 else
1082 urb_done = max3421_transfer_out_done(hcd, urb);
1083 if (urb_done > 0 && usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1084 /*
1085 * We aren't really done - we still need to
1086 * terminate the control transfer:
1087 */
1088 max3421_hcd->urb_done = urb_done = 0;
1089 max3421_ep->pkt_state = PKT_STATE_TERMINATE;
1090 }
1091 break;
1092
1093 case PKT_STATE_TERMINATE:
1094 urb_done = 1;
1095 break;
1096 }
1097
1098 if (urb_done)
1099 max3421_hcd->urb_done = urb_done;
1100 else
1101 max3421_next_transfer(hcd, 0);
1102}
1103
1104/*
1105 * Caller must NOT hold HCD spinlock.
1106 */
1107static void
1108max3421_detect_conn(struct usb_hcd *hcd)
1109{
1110 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1111 unsigned int jk, have_conn = 0;
1112 u32 old_port_status, chg;
1113 unsigned long flags;
1114 u8 hrsl, mode;
1115
1116 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
1117
1118 jk = ((((hrsl >> MAX3421_HRSL_JSTATUS_BIT) & 1) << 0) |
1119 (((hrsl >> MAX3421_HRSL_KSTATUS_BIT) & 1) << 1));
1120
1121 mode = max3421_hcd->mode;
1122
1123 switch (jk) {
1124 case 0x0: /* SE0: disconnect */
1125 /*
1126 * Turn off SOFKAENAB bit to avoid getting interrupt
1127 * every milli-second:
1128 */
1129 mode &= ~BIT(MAX3421_MODE_SOFKAENAB_BIT);
1130 break;
1131
1132 case 0x1: /* J=0,K=1: low-speed (in full-speed or vice versa) */
1133 case 0x2: /* J=1,K=0: full-speed (in full-speed or vice versa) */
1134 if (jk == 0x2)
1135 /* need to switch to the other speed: */
1136 mode ^= BIT(MAX3421_MODE_LOWSPEED_BIT);
1137 /* turn on SOFKAENAB bit: */
1138 mode |= BIT(MAX3421_MODE_SOFKAENAB_BIT);
1139 have_conn = 1;
1140 break;
1141
1142 case 0x3: /* illegal */
1143 break;
1144 }
1145
1146 max3421_hcd->mode = mode;
1147 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
1148
1149 spin_lock_irqsave(&max3421_hcd->lock, flags);
1150 old_port_status = max3421_hcd->port_status;
1151 if (have_conn)
1152 max3421_hcd->port_status |= USB_PORT_STAT_CONNECTION;
1153 else
1154 max3421_hcd->port_status &= ~USB_PORT_STAT_CONNECTION;
1155 if (mode & BIT(MAX3421_MODE_LOWSPEED_BIT))
1156 max3421_hcd->port_status |= USB_PORT_STAT_LOW_SPEED;
1157 else
1158 max3421_hcd->port_status &= ~USB_PORT_STAT_LOW_SPEED;
1159 chg = (old_port_status ^ max3421_hcd->port_status);
1160 max3421_hcd->port_status |= chg << 16;
1161 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1162}
1163
1164static irqreturn_t
1165max3421_irq_handler(int irq, void *dev_id)
1166{
1167 struct usb_hcd *hcd = dev_id;
1168 struct spi_device *spi = to_spi_device(hcd->self.controller);
1169 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1170
1171 if (max3421_hcd->spi_thread &&
1172 max3421_hcd->spi_thread->state != TASK_RUNNING)
1173 wake_up_process(max3421_hcd->spi_thread);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001174 if (!test_and_set_bit(ENABLE_IRQ, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001175 disable_irq_nosync(spi->irq);
David Mosberger2d531392014-04-28 22:14:07 -06001176 return IRQ_HANDLED;
1177}
1178
1179#ifdef DEBUG
1180
1181static void
1182dump_eps(struct usb_hcd *hcd)
1183{
1184 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1185 struct max3421_ep *max3421_ep;
1186 struct usb_host_endpoint *ep;
David Mosberger2d531392014-04-28 22:14:07 -06001187 char ubuf[512], *dp, *end;
1188 unsigned long flags;
1189 struct urb *urb;
1190 int epnum, ret;
1191
1192 spin_lock_irqsave(&max3421_hcd->lock, flags);
Geliang Tang553c2362015-12-20 00:11:50 +08001193 list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
David Mosberger2d531392014-04-28 22:14:07 -06001194 ep = max3421_ep->ep;
1195
1196 dp = ubuf;
1197 end = dp + sizeof(ubuf);
1198 *dp = '\0';
Geliang Tang553c2362015-12-20 00:11:50 +08001199 list_for_each_entry(urb, &ep->urb_list, urb_list) {
David Mosberger2d531392014-04-28 22:14:07 -06001200 ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
1201 usb_pipetype(urb->pipe),
1202 usb_urb_dir_in(urb) ? "IN" : "OUT",
1203 urb->actual_length,
1204 urb->transfer_buffer_length);
1205 if (ret < 0 || ret >= end - dp)
1206 break; /* error or buffer full */
1207 dp += ret;
1208 }
1209
1210 epnum = usb_endpoint_num(&ep->desc);
1211 pr_info("EP%0u %u lst %04u rtr %u nak %6u rxmt %u: %s\n",
1212 epnum, max3421_ep->pkt_state, max3421_ep->last_active,
1213 max3421_ep->retries, max3421_ep->naks,
1214 max3421_ep->retransmit, ubuf);
1215 }
1216 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1217}
1218
1219#endif /* DEBUG */
1220
1221/* Return zero if no work was performed, 1 otherwise. */
1222static int
1223max3421_handle_irqs(struct usb_hcd *hcd)
1224{
1225 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1226 u32 chg, old_port_status;
1227 unsigned long flags;
1228 u8 hirq;
1229
1230 /*
1231 * Read and ack pending interrupts (CPU must never
1232 * clear SNDBAV directly and RCVDAV must be cleared by
1233 * max3421_recv_data_available()!):
1234 */
1235 hirq = spi_rd8(hcd, MAX3421_REG_HIRQ);
1236 hirq &= max3421_hcd->hien;
1237 if (!hirq)
1238 return 0;
1239
1240 spi_wr8(hcd, MAX3421_REG_HIRQ,
1241 hirq & ~(BIT(MAX3421_HI_SNDBAV_BIT) |
1242 BIT(MAX3421_HI_RCVDAV_BIT)));
1243
1244 if (hirq & BIT(MAX3421_HI_FRAME_BIT)) {
1245 max3421_hcd->frame_number = ((max3421_hcd->frame_number + 1)
1246 & USB_MAX_FRAME_NUMBER);
1247 max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
1248 }
1249
1250 if (hirq & BIT(MAX3421_HI_RCVDAV_BIT))
1251 max3421_recv_data_available(hcd);
1252
1253 if (hirq & BIT(MAX3421_HI_HXFRDN_BIT))
1254 max3421_host_transfer_done(hcd);
1255
1256 if (hirq & BIT(MAX3421_HI_CONDET_BIT))
1257 max3421_detect_conn(hcd);
1258
1259 /*
1260 * Now process interrupts that may affect HCD state
1261 * other than the end-points:
1262 */
1263 spin_lock_irqsave(&max3421_hcd->lock, flags);
1264
1265 old_port_status = max3421_hcd->port_status;
1266 if (hirq & BIT(MAX3421_HI_BUSEVENT_BIT)) {
1267 if (max3421_hcd->port_status & USB_PORT_STAT_RESET) {
1268 /* BUSEVENT due to completion of Bus Reset */
1269 max3421_hcd->port_status &= ~USB_PORT_STAT_RESET;
1270 max3421_hcd->port_status |= USB_PORT_STAT_ENABLE;
1271 } else {
1272 /* BUSEVENT due to completion of Bus Resume */
1273 pr_info("%s: BUSEVENT Bus Resume Done\n", __func__);
1274 }
1275 }
1276 if (hirq & BIT(MAX3421_HI_RWU_BIT))
1277 pr_info("%s: RWU\n", __func__);
1278 if (hirq & BIT(MAX3421_HI_SUSDN_BIT))
1279 pr_info("%s: SUSDN\n", __func__);
1280
1281 chg = (old_port_status ^ max3421_hcd->port_status);
1282 max3421_hcd->port_status |= chg << 16;
1283
1284 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1285
1286#ifdef DEBUG
1287 {
1288 static unsigned long last_time;
1289 char sbuf[16 * 16], *dp, *end;
1290 int i;
1291
Asaf Vertz788bfe82014-12-15 09:22:07 +02001292 if (time_after(jiffies, last_time + 5*HZ)) {
David Mosberger2d531392014-04-28 22:14:07 -06001293 dp = sbuf;
1294 end = sbuf + sizeof(sbuf);
1295 *dp = '\0';
1296 for (i = 0; i < 16; ++i) {
1297 int ret = snprintf(dp, end - dp, " %lu",
1298 max3421_hcd->err_stat[i]);
1299 if (ret < 0 || ret >= end - dp)
1300 break; /* error or buffer full */
1301 dp += ret;
1302 }
1303 pr_info("%s: hrsl_stats %s\n", __func__, sbuf);
1304 memset(max3421_hcd->err_stat, 0,
1305 sizeof(max3421_hcd->err_stat));
1306 last_time = jiffies;
1307
1308 dump_eps(hcd);
1309 }
1310 }
1311#endif
1312 return 1;
1313}
1314
1315static int
1316max3421_reset_hcd(struct usb_hcd *hcd)
1317{
1318 struct spi_device *spi = to_spi_device(hcd->self.controller);
1319 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1320 int timeout;
1321
1322 /* perform a chip reset and wait for OSCIRQ signal to appear: */
1323 spi_wr8(hcd, MAX3421_REG_USBCTL, BIT(MAX3421_USBCTL_CHIPRES_BIT));
1324 /* clear reset: */
1325 spi_wr8(hcd, MAX3421_REG_USBCTL, 0);
1326 timeout = 1000;
1327 while (1) {
1328 if (spi_rd8(hcd, MAX3421_REG_USBIRQ)
1329 & BIT(MAX3421_USBIRQ_OSCOKIRQ_BIT))
1330 break;
1331 if (--timeout < 0) {
1332 dev_err(&spi->dev,
1333 "timed out waiting for oscillator OK signal");
1334 return 1;
1335 }
1336 cond_resched();
1337 }
1338
1339 /*
1340 * Turn on host mode, automatic generation of SOF packets, and
1341 * enable pull-down registers on DM/DP:
1342 */
1343 max3421_hcd->mode = (BIT(MAX3421_MODE_HOST_BIT) |
1344 BIT(MAX3421_MODE_SOFKAENAB_BIT) |
1345 BIT(MAX3421_MODE_DMPULLDN_BIT) |
1346 BIT(MAX3421_MODE_DPPULLDN_BIT));
1347 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
1348
1349 /* reset frame-number: */
1350 max3421_hcd->frame_number = USB_MAX_FRAME_NUMBER;
1351 spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_FRMRST_BIT));
1352
1353 /* sample the state of the D+ and D- lines */
1354 spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_SAMPLEBUS_BIT));
1355 max3421_detect_conn(hcd);
1356
1357 /* enable frame, connection-detected, and bus-event interrupts: */
1358 max3421_hcd->hien = (BIT(MAX3421_HI_FRAME_BIT) |
1359 BIT(MAX3421_HI_CONDET_BIT) |
1360 BIT(MAX3421_HI_BUSEVENT_BIT));
1361 spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
1362
1363 /* enable interrupts: */
1364 spi_wr8(hcd, MAX3421_REG_CPUCTL, BIT(MAX3421_CPUCTL_IE_BIT));
1365 return 1;
1366}
1367
1368static int
1369max3421_urb_done(struct usb_hcd *hcd)
1370{
1371 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1372 unsigned long flags;
1373 struct urb *urb;
1374 int status;
1375
1376 status = max3421_hcd->urb_done;
1377 max3421_hcd->urb_done = 0;
1378 if (status > 0)
1379 status = 0;
1380 urb = max3421_hcd->curr_urb;
1381 if (urb) {
1382 max3421_hcd->curr_urb = NULL;
1383 spin_lock_irqsave(&max3421_hcd->lock, flags);
1384 usb_hcd_unlink_urb_from_ep(hcd, urb);
1385 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1386
1387 /* must be called without the HCD spinlock: */
1388 usb_hcd_giveback_urb(hcd, urb, status);
1389 }
1390 return 1;
1391}
1392
1393static int
1394max3421_spi_thread(void *dev_id)
1395{
1396 struct usb_hcd *hcd = dev_id;
1397 struct spi_device *spi = to_spi_device(hcd->self.controller);
1398 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1399 int i, i_worked = 1;
1400
1401 /* set full-duplex SPI mode, low-active interrupt pin: */
1402 spi_wr8(hcd, MAX3421_REG_PINCTL,
1403 (BIT(MAX3421_PINCTL_FDUPSPI_BIT) | /* full-duplex */
1404 BIT(MAX3421_PINCTL_INTLEVEL_BIT))); /* low-active irq */
1405
1406 while (!kthread_should_stop()) {
1407 max3421_hcd->rev = spi_rd8(hcd, MAX3421_REG_REVISION);
1408 if (max3421_hcd->rev == 0x12 || max3421_hcd->rev == 0x13)
1409 break;
1410 dev_err(&spi->dev, "bad rev 0x%02x", max3421_hcd->rev);
1411 msleep(10000);
1412 }
1413 dev_info(&spi->dev, "rev 0x%x, SPI clk %dHz, bpw %u, irq %d\n",
1414 max3421_hcd->rev, spi->max_speed_hz, spi->bits_per_word,
1415 spi->irq);
1416
1417 while (!kthread_should_stop()) {
1418 if (!i_worked) {
1419 /*
1420 * We'll be waiting for wakeups from the hard
1421 * interrupt handler, so now is a good time to
1422 * sync our hien with the chip:
1423 */
1424 spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
1425
1426 set_current_state(TASK_INTERRUPTIBLE);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001427 if (test_and_clear_bit(ENABLE_IRQ, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001428 enable_irq(spi->irq);
David Mosberger2d531392014-04-28 22:14:07 -06001429 schedule();
1430 __set_current_state(TASK_RUNNING);
1431 }
1432
1433 i_worked = 0;
1434
1435 if (max3421_hcd->urb_done)
1436 i_worked |= max3421_urb_done(hcd);
1437 else if (max3421_handle_irqs(hcd))
1438 i_worked = 1;
1439 else if (!max3421_hcd->curr_urb)
1440 i_worked |= max3421_select_and_start_urb(hcd);
1441
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001442 if (test_and_clear_bit(RESET_HCD, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001443 /* reset the HCD: */
David Mosberger2d531392014-04-28 22:14:07 -06001444 i_worked |= max3421_reset_hcd(hcd);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001445 if (test_and_clear_bit(RESET_PORT, &max3421_hcd->todo)) {
David Mosberger2d531392014-04-28 22:14:07 -06001446 /* perform a USB bus reset: */
David Mosberger2d531392014-04-28 22:14:07 -06001447 spi_wr8(hcd, MAX3421_REG_HCTL,
1448 BIT(MAX3421_HCTL_BUSRST_BIT));
1449 i_worked = 1;
1450 }
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001451 if (test_and_clear_bit(CHECK_UNLINK, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001452 i_worked |= max3421_check_unlink(hcd);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001453 if (test_and_clear_bit(IOPIN_UPDATE, &max3421_hcd->todo)) {
David Mosberger2d531392014-04-28 22:14:07 -06001454 /*
1455 * IOPINS1/IOPINS2 do not auto-increment, so we can't
1456 * use spi_wr_buf().
1457 */
1458 for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
1459 u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
1460
1461 val = ((val & 0xf0) |
1462 (max3421_hcd->iopins[i] & 0x0f));
1463 spi_wr8(hcd, MAX3421_REG_IOPINS1 + i, val);
1464 max3421_hcd->iopins[i] = val;
1465 }
David Mosberger2d531392014-04-28 22:14:07 -06001466 i_worked = 1;
1467 }
1468 }
1469 set_current_state(TASK_RUNNING);
1470 dev_info(&spi->dev, "SPI thread exiting");
1471 return 0;
1472}
1473
1474static int
1475max3421_reset_port(struct usb_hcd *hcd)
1476{
1477 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1478
1479 max3421_hcd->port_status &= ~(USB_PORT_STAT_ENABLE |
1480 USB_PORT_STAT_LOW_SPEED);
David Mosberger-Tanga2b63cb2014-06-19 12:57:28 -06001481 max3421_hcd->port_status |= USB_PORT_STAT_RESET;
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001482 set_bit(RESET_PORT, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001483 wake_up_process(max3421_hcd->spi_thread);
1484 return 0;
1485}
1486
1487static int
1488max3421_reset(struct usb_hcd *hcd)
1489{
1490 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1491
1492 hcd->self.sg_tablesize = 0;
1493 hcd->speed = HCD_USB2;
1494 hcd->self.root_hub->speed = USB_SPEED_FULL;
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001495 set_bit(RESET_HCD, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001496 wake_up_process(max3421_hcd->spi_thread);
1497 return 0;
1498}
1499
1500static int
1501max3421_start(struct usb_hcd *hcd)
1502{
1503 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1504
1505 spin_lock_init(&max3421_hcd->lock);
1506 max3421_hcd->rh_state = MAX3421_RH_RUNNING;
1507
1508 INIT_LIST_HEAD(&max3421_hcd->ep_list);
1509
1510 hcd->power_budget = POWER_BUDGET;
1511 hcd->state = HC_STATE_RUNNING;
1512 hcd->uses_new_polling = 1;
1513 return 0;
1514}
1515
1516static void
1517max3421_stop(struct usb_hcd *hcd)
1518{
1519}
1520
1521static int
1522max3421_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1523{
1524 struct spi_device *spi = to_spi_device(hcd->self.controller);
1525 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1526 struct max3421_ep *max3421_ep;
1527 unsigned long flags;
1528 int retval;
1529
1530 switch (usb_pipetype(urb->pipe)) {
1531 case PIPE_INTERRUPT:
1532 case PIPE_ISOCHRONOUS:
1533 if (urb->interval < 0) {
1534 dev_err(&spi->dev,
1535 "%s: interval=%d for intr-/iso-pipe; expected > 0\n",
1536 __func__, urb->interval);
1537 return -EINVAL;
1538 }
1539 default:
1540 break;
1541 }
1542
1543 spin_lock_irqsave(&max3421_hcd->lock, flags);
1544
1545 max3421_ep = urb->ep->hcpriv;
1546 if (!max3421_ep) {
1547 /* gets freed in max3421_endpoint_disable: */
Alexey Khoroshilov6c0f3692014-06-19 23:44:57 +04001548 max3421_ep = kzalloc(sizeof(struct max3421_ep), GFP_ATOMIC);
David Mosberger-Tang00c5aa12014-05-28 16:09:16 -06001549 if (!max3421_ep) {
1550 retval = -ENOMEM;
1551 goto out;
1552 }
David Mosberger2d531392014-04-28 22:14:07 -06001553 max3421_ep->ep = urb->ep;
1554 max3421_ep->last_active = max3421_hcd->frame_number;
1555 urb->ep->hcpriv = max3421_ep;
1556
1557 list_add_tail(&max3421_ep->ep_list, &max3421_hcd->ep_list);
1558 }
1559
1560 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1561 if (retval == 0) {
1562 /* Since we added to the queue, restart scheduling: */
1563 max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
1564 wake_up_process(max3421_hcd->spi_thread);
1565 }
1566
David Mosberger-Tang00c5aa12014-05-28 16:09:16 -06001567out:
David Mosberger2d531392014-04-28 22:14:07 -06001568 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1569 return retval;
1570}
1571
1572static int
1573max3421_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1574{
1575 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1576 unsigned long flags;
1577 int retval;
1578
1579 spin_lock_irqsave(&max3421_hcd->lock, flags);
1580
1581 /*
1582 * This will set urb->unlinked which in turn causes the entry
1583 * to be dropped at the next opportunity.
1584 */
1585 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1586 if (retval == 0) {
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001587 set_bit(CHECK_UNLINK, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001588 wake_up_process(max3421_hcd->spi_thread);
1589 }
1590 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1591 return retval;
1592}
1593
1594static void
1595max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1596{
1597 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1598 unsigned long flags;
1599
1600 spin_lock_irqsave(&max3421_hcd->lock, flags);
1601
1602 if (ep->hcpriv) {
1603 struct max3421_ep *max3421_ep = ep->hcpriv;
1604
1605 /* remove myself from the ep_list: */
1606 if (!list_empty(&max3421_ep->ep_list))
1607 list_del(&max3421_ep->ep_list);
1608 kfree(max3421_ep);
1609 ep->hcpriv = NULL;
1610 }
1611
1612 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1613}
1614
1615static int
1616max3421_get_frame_number(struct usb_hcd *hcd)
1617{
1618 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1619 return max3421_hcd->frame_number;
1620}
1621
1622/*
1623 * Should return a non-zero value when any port is undergoing a resume
1624 * transition while the root hub is suspended.
1625 */
1626static int
1627max3421_hub_status_data(struct usb_hcd *hcd, char *buf)
1628{
1629 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1630 unsigned long flags;
1631 int retval = 0;
1632
1633 spin_lock_irqsave(&max3421_hcd->lock, flags);
1634 if (!HCD_HW_ACCESSIBLE(hcd))
1635 goto done;
1636
1637 *buf = 0;
1638 if ((max3421_hcd->port_status & PORT_C_MASK) != 0) {
1639 *buf = (1 << 1); /* a hub over-current condition exists */
1640 dev_dbg(hcd->self.controller,
1641 "port status 0x%08x has changes\n",
1642 max3421_hcd->port_status);
1643 retval = 1;
1644 if (max3421_hcd->rh_state == MAX3421_RH_SUSPENDED)
1645 usb_hcd_resume_root_hub(hcd);
1646 }
1647done:
1648 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1649 return retval;
1650}
1651
1652static inline void
1653hub_descriptor(struct usb_hub_descriptor *desc)
1654{
1655 memset(desc, 0, sizeof(*desc));
1656 /*
1657 * See Table 11-13: Hub Descriptor in USB 2.0 spec.
1658 */
Sergei Shtylyove3d02e02015-03-29 01:14:03 +03001659 desc->bDescriptorType = USB_DT_HUB; /* hub descriptor */
David Mosberger2d531392014-04-28 22:14:07 -06001660 desc->bDescLength = 9;
Sergei Shtylyov2e48c462015-01-19 01:38:22 +03001661 desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM |
1662 HUB_CHAR_COMMON_OCPM);
David Mosberger2d531392014-04-28 22:14:07 -06001663 desc->bNbrPorts = 1;
1664}
1665
1666/*
1667 * Set the MAX3421E general-purpose output with number PIN_NUMBER to
1668 * VALUE (0 or 1). PIN_NUMBER may be in the range from 1-8. For
1669 * any other value, this function acts as a no-op.
1670 */
1671static void
1672max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
1673{
1674 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1675 u8 mask, idx;
1676
1677 --pin_number;
Jules Maselbas721fdc82017-09-15 18:58:45 +02001678 if (pin_number >= MAX3421_GPOUT_COUNT)
David Mosberger2d531392014-04-28 22:14:07 -06001679 return;
1680
Jaewon Kim59b71f72016-07-21 22:20:53 +09001681 mask = 1u << (pin_number % 4);
David Mosberger2d531392014-04-28 22:14:07 -06001682 idx = pin_number / 4;
1683
1684 if (value)
1685 max3421_hcd->iopins[idx] |= mask;
1686 else
1687 max3421_hcd->iopins[idx] &= ~mask;
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001688 set_bit(IOPIN_UPDATE, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001689 wake_up_process(max3421_hcd->spi_thread);
1690}
1691
1692static int
1693max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
1694 char *buf, u16 length)
1695{
1696 struct spi_device *spi = to_spi_device(hcd->self.controller);
1697 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1698 struct max3421_hcd_platform_data *pdata;
1699 unsigned long flags;
1700 int retval = 0;
1701
David Mosberger2d531392014-04-28 22:14:07 -06001702 pdata = spi->dev.platform_data;
Jules Maselbas892f6eb2017-10-31 11:40:33 +01001703
1704 spin_lock_irqsave(&max3421_hcd->lock, flags);
David Mosberger2d531392014-04-28 22:14:07 -06001705
1706 switch (type_req) {
1707 case ClearHubFeature:
1708 break;
1709 case ClearPortFeature:
1710 switch (value) {
1711 case USB_PORT_FEAT_SUSPEND:
1712 break;
1713 case USB_PORT_FEAT_POWER:
1714 dev_dbg(hcd->self.controller, "power-off\n");
David Mosberger-Tang4055e5e2014-05-29 10:23:55 -06001715 max3421_gpout_set_value(hcd, pdata->vbus_gpout,
1716 !pdata->vbus_active_level);
David Mosberger2d531392014-04-28 22:14:07 -06001717 /* FALLS THROUGH */
1718 default:
1719 max3421_hcd->port_status &= ~(1 << value);
1720 }
1721 break;
1722 case GetHubDescriptor:
1723 hub_descriptor((struct usb_hub_descriptor *) buf);
1724 break;
1725
1726 case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
1727 case GetPortErrorCount:
1728 case SetHubDepth:
1729 /* USB3 only */
1730 goto error;
1731
1732 case GetHubStatus:
1733 *(__le32 *) buf = cpu_to_le32(0);
1734 break;
1735
1736 case GetPortStatus:
1737 if (index != 1) {
1738 retval = -EPIPE;
1739 goto error;
1740 }
1741 ((__le16 *) buf)[0] = cpu_to_le16(max3421_hcd->port_status);
1742 ((__le16 *) buf)[1] =
1743 cpu_to_le16(max3421_hcd->port_status >> 16);
1744 break;
1745
1746 case SetHubFeature:
1747 retval = -EPIPE;
1748 break;
1749
1750 case SetPortFeature:
1751 switch (value) {
1752 case USB_PORT_FEAT_LINK_STATE:
1753 case USB_PORT_FEAT_U1_TIMEOUT:
1754 case USB_PORT_FEAT_U2_TIMEOUT:
1755 case USB_PORT_FEAT_BH_PORT_RESET:
1756 goto error;
1757 case USB_PORT_FEAT_SUSPEND:
1758 if (max3421_hcd->active)
1759 max3421_hcd->port_status |=
1760 USB_PORT_STAT_SUSPEND;
1761 break;
1762 case USB_PORT_FEAT_POWER:
1763 dev_dbg(hcd->self.controller, "power-on\n");
1764 max3421_hcd->port_status |= USB_PORT_STAT_POWER;
David Mosberger-Tang4055e5e2014-05-29 10:23:55 -06001765 max3421_gpout_set_value(hcd, pdata->vbus_gpout,
1766 pdata->vbus_active_level);
David Mosberger2d531392014-04-28 22:14:07 -06001767 break;
1768 case USB_PORT_FEAT_RESET:
1769 max3421_reset_port(hcd);
1770 /* FALLS THROUGH */
1771 default:
1772 if ((max3421_hcd->port_status & USB_PORT_STAT_POWER)
1773 != 0)
1774 max3421_hcd->port_status |= (1 << value);
1775 }
1776 break;
1777
1778 default:
1779 dev_dbg(hcd->self.controller,
1780 "hub control req%04x v%04x i%04x l%d\n",
1781 type_req, value, index, length);
1782error: /* "protocol stall" on error */
1783 retval = -EPIPE;
1784 }
1785
1786 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1787 return retval;
1788}
1789
1790static int
1791max3421_bus_suspend(struct usb_hcd *hcd)
1792{
1793 return -1;
1794}
1795
1796static int
1797max3421_bus_resume(struct usb_hcd *hcd)
1798{
1799 return -1;
1800}
1801
1802/*
1803 * The SPI driver already takes care of DMA-mapping/unmapping, so no
1804 * reason to do it twice.
1805 */
1806static int
1807max3421_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1808{
1809 return 0;
1810}
1811
1812static void
1813max3421_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1814{
1815}
1816
Julia Lawall887e2e02017-07-28 22:41:49 +02001817static const struct hc_driver max3421_hcd_desc = {
David Mosberger2d531392014-04-28 22:14:07 -06001818 .description = "max3421",
1819 .product_desc = DRIVER_DESC,
1820 .hcd_priv_size = sizeof(struct max3421_hcd),
1821 .flags = HCD_USB11,
1822 .reset = max3421_reset,
1823 .start = max3421_start,
1824 .stop = max3421_stop,
1825 .get_frame_number = max3421_get_frame_number,
1826 .urb_enqueue = max3421_urb_enqueue,
1827 .urb_dequeue = max3421_urb_dequeue,
1828 .map_urb_for_dma = max3421_map_urb_for_dma,
1829 .unmap_urb_for_dma = max3421_unmap_urb_for_dma,
1830 .endpoint_disable = max3421_endpoint_disable,
1831 .hub_status_data = max3421_hub_status_data,
1832 .hub_control = max3421_hub_control,
1833 .bus_suspend = max3421_bus_suspend,
1834 .bus_resume = max3421_bus_resume,
1835};
1836
1837static int
Jules Maselbas721fdc82017-09-15 18:58:45 +02001838max3421_of_vbus_en_pin(struct device *dev, struct max3421_hcd_platform_data *pdata)
1839{
1840 int retval;
1841 uint32_t value[2];
1842
1843 if (!pdata)
1844 return -EINVAL;
1845
1846 retval = of_property_read_u32_array(dev->of_node, "maxim,vbus-en-pin", value, 2);
1847 if (retval) {
1848 dev_err(dev, "device tree node property 'maxim,vbus-en-pin' is missing\n");
1849 return retval;
1850 }
1851 dev_info(dev, "property 'maxim,vbus-en-pin' value is <%d %d>\n", value[0], value[1]);
1852
1853 pdata->vbus_gpout = value[0];
1854 pdata->vbus_active_level = value[1];
1855
1856 return 0;
1857}
1858
1859static int
David Mosberger2d531392014-04-28 22:14:07 -06001860max3421_probe(struct spi_device *spi)
1861{
Jules Maselbas721fdc82017-09-15 18:58:45 +02001862 struct device *dev = &spi->dev;
David Mosberger2d531392014-04-28 22:14:07 -06001863 struct max3421_hcd *max3421_hcd;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001864 struct usb_hcd *hcd = NULL;
Jules Maselbas721fdc82017-09-15 18:58:45 +02001865 struct max3421_hcd_platform_data *pdata = NULL;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001866 int retval = -ENOMEM;
David Mosberger2d531392014-04-28 22:14:07 -06001867
1868 if (spi_setup(spi) < 0) {
1869 dev_err(&spi->dev, "Unable to setup SPI bus");
1870 return -EFAULT;
1871 }
1872
Jules Maselbas721fdc82017-09-15 18:58:45 +02001873 if (!spi->irq) {
1874 dev_err(dev, "Failed to get SPI IRQ");
1875 return -EFAULT;
1876 }
1877
1878 if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
1879 pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
1880 if (!pdata) {
Jules Maselbas721fdc82017-09-15 18:58:45 +02001881 retval = -ENOMEM;
1882 goto error;
1883 }
1884 retval = max3421_of_vbus_en_pin(dev, pdata);
1885 if (retval)
1886 goto error;
1887
1888 spi->dev.platform_data = pdata;
1889 }
1890
1891 pdata = spi->dev.platform_data;
1892 if (!pdata) {
1893 dev_err(&spi->dev, "driver configuration data is not provided\n");
1894 retval = -EFAULT;
1895 goto error;
1896 }
1897 if (pdata->vbus_active_level > 1) {
1898 dev_err(&spi->dev, "vbus active level value %d is out of range (0/1)\n", pdata->vbus_active_level);
1899 retval = -EINVAL;
1900 goto error;
1901 }
1902 if (pdata->vbus_gpout < 1 || pdata->vbus_gpout > MAX3421_GPOUT_COUNT) {
1903 dev_err(&spi->dev, "vbus gpout value %d is out of range (1..8)\n", pdata->vbus_gpout);
1904 retval = -EINVAL;
1905 goto error;
1906 }
1907
David Mosberger2d531392014-04-28 22:14:07 -06001908 hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
1909 dev_name(&spi->dev));
1910 if (!hcd) {
1911 dev_err(&spi->dev, "failed to create HCD structure\n");
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001912 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001913 }
1914 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1915 max3421_hcd = hcd_to_max3421(hcd);
1916 max3421_hcd->next = max3421_hcd_list;
1917 max3421_hcd_list = max3421_hcd;
1918 INIT_LIST_HEAD(&max3421_hcd->ep_list);
1919
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001920 max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
Wolfram Sang13dcf7802016-08-25 19:39:07 +02001921 if (!max3421_hcd->tx)
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001922 goto error;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001923 max3421_hcd->rx = kmalloc(sizeof(*max3421_hcd->rx), GFP_KERNEL);
Wolfram Sang13dcf7802016-08-25 19:39:07 +02001924 if (!max3421_hcd->rx)
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001925 goto error;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001926
David Mosberger2d531392014-04-28 22:14:07 -06001927 max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd,
1928 "max3421_spi_thread");
1929 if (max3421_hcd->spi_thread == ERR_PTR(-ENOMEM)) {
1930 dev_err(&spi->dev,
1931 "failed to create SPI thread (out of memory)\n");
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001932 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001933 }
1934
1935 retval = usb_add_hcd(hcd, 0, 0);
1936 if (retval) {
1937 dev_err(&spi->dev, "failed to add HCD\n");
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001938 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001939 }
1940
1941 retval = request_irq(spi->irq, max3421_irq_handler,
1942 IRQF_TRIGGER_LOW, "max3421", hcd);
1943 if (retval < 0) {
David Mosberger2d531392014-04-28 22:14:07 -06001944 dev_err(&spi->dev, "failed to request irq %d\n", spi->irq);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001945 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001946 }
1947 return 0;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001948
1949error:
Jules Maselbas721fdc82017-09-15 18:58:45 +02001950 if (IS_ENABLED(CONFIG_OF) && dev->of_node && pdata) {
1951 devm_kfree(&spi->dev, pdata);
1952 spi->dev.platform_data = NULL;
1953 }
1954
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001955 if (hcd) {
1956 kfree(max3421_hcd->tx);
1957 kfree(max3421_hcd->rx);
1958 if (max3421_hcd->spi_thread)
1959 kthread_stop(max3421_hcd->spi_thread);
1960 usb_put_hcd(hcd);
1961 }
1962 return retval;
David Mosberger2d531392014-04-28 22:14:07 -06001963}
1964
1965static int
1966max3421_remove(struct spi_device *spi)
1967{
1968 struct max3421_hcd *max3421_hcd = NULL, **prev;
1969 struct usb_hcd *hcd = NULL;
1970 unsigned long flags;
1971
1972 for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) {
1973 max3421_hcd = *prev;
1974 hcd = max3421_to_hcd(max3421_hcd);
1975 if (hcd->self.controller == &spi->dev)
1976 break;
1977 }
1978 if (!max3421_hcd) {
1979 dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n",
1980 spi);
1981 return -ENODEV;
1982 }
1983
1984 usb_remove_hcd(hcd);
1985
1986 spin_lock_irqsave(&max3421_hcd->lock, flags);
1987
1988 kthread_stop(max3421_hcd->spi_thread);
1989 *prev = max3421_hcd->next;
1990
1991 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1992
1993 free_irq(spi->irq, hcd);
1994
1995 usb_put_hcd(hcd);
1996 return 0;
1997}
1998
Jules Maselbas721fdc82017-09-15 18:58:45 +02001999static const struct of_device_id max3421_of_match_table[] = {
2000 { .compatible = "maxim,max3421", },
2001 {},
2002};
2003MODULE_DEVICE_TABLE(of, max3421_of_match_table);
2004
David Mosberger2d531392014-04-28 22:14:07 -06002005static struct spi_driver max3421_driver = {
2006 .probe = max3421_probe,
2007 .remove = max3421_remove,
2008 .driver = {
2009 .name = "max3421-hcd",
Jules Maselbas721fdc82017-09-15 18:58:45 +02002010 .of_match_table = of_match_ptr(max3421_of_match_table),
David Mosberger2d531392014-04-28 22:14:07 -06002011 },
2012};
2013
Sachin Kamat7df45d52014-05-29 17:21:01 +05302014module_spi_driver(max3421_driver);
David Mosberger2d531392014-04-28 22:14:07 -06002015
2016MODULE_DESCRIPTION(DRIVER_DESC);
2017MODULE_AUTHOR("David Mosberger <davidm@egauge.net>");
2018MODULE_LICENSE("GPL");