blob: e7a8e06098535ced93dae0da4deba4ddc6d63016 [file] [log] [blame]
Greg Kroah-Hartman5fd54ac2017-11-03 11:28:30 +01001// SPDX-License-Identifier: GPL-2.0
David Mosberger2d531392014-04-28 22:14:07 -06002/*
3 * MAX3421 Host Controller driver for USB.
4 *
5 * Author: David Mosberger-Tang <davidm@egauge.net>
6 *
7 * (C) Copyright 2014 David Mosberger-Tang <davidm@egauge.net>
8 *
9 * MAX3421 is a chip implementing a USB 2.0 Full-/Low-Speed host
10 * controller on a SPI bus.
11 *
12 * Based on:
13 * o MAX3421E datasheet
Alexander A. Klimovffeb1e92020-07-19 18:09:10 +020014 * https://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf
David Mosberger2d531392014-04-28 22:14:07 -060015 * o MAX3421E Programming Guide
Alexander A. Klimovffeb1e92020-07-19 18:09:10 +020016 * https://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf
David Mosberger2d531392014-04-28 22:14:07 -060017 * o gadget/dummy_hcd.c
18 * For USB HCD implementation.
19 * o Arduino MAX3421 driver
20 * https://github.com/felis/USB_Host_Shield_2.0/blob/master/Usb.cpp
21 *
22 * This file is licenced under the GPL v2.
23 *
24 * Important note on worst-case (full-speed) packet size constraints
25 * (See USB 2.0 Section 5.6.3 and following):
26 *
27 * - control: 64 bytes
28 * - isochronous: 1023 bytes
29 * - interrupt: 64 bytes
30 * - bulk: 64 bytes
31 *
32 * Since the MAX3421 FIFO size is 64 bytes, we do not have to work about
33 * multi-FIFO writes/reads for a single USB packet *except* for isochronous
34 * transfers. We don't support isochronous transfers at this time, so we
35 * just assume that a USB packet always fits into a single FIFO buffer.
36 *
37 * NOTE: The June 2006 version of "MAX3421E Programming Guide"
38 * (AN3785) has conflicting info for the RCVDAVIRQ bit:
39 *
40 * The description of RCVDAVIRQ says "The CPU *must* clear
41 * this IRQ bit (by writing a 1 to it) before reading the
42 * RCVFIFO data.
43 *
44 * However, the earlier section on "Programming BULK-IN
45 * Transfers" says * that:
46 *
47 * After the CPU retrieves the data, it clears the
48 * RCVDAVIRQ bit.
49 *
50 * The December 2006 version has been corrected and it consistently
51 * states the second behavior is the correct one.
52 *
53 * Synchronous SPI transactions sleep so we can't perform any such
54 * transactions while holding a spin-lock (and/or while interrupts are
55 * masked). To achieve this, all SPI transactions are issued from a
56 * single thread (max3421_spi_thread).
57 */
58
Asaf Vertz788bfe82014-12-15 09:22:07 +020059#include <linux/jiffies.h>
David Mosberger2d531392014-04-28 22:14:07 -060060#include <linux/module.h>
61#include <linux/spi/spi.h>
62#include <linux/usb.h>
63#include <linux/usb/hcd.h>
Jules Maselbas721fdc82017-09-15 18:58:45 +020064#include <linux/of.h>
David Mosberger2d531392014-04-28 22:14:07 -060065
66#include <linux/platform_data/max3421-hcd.h>
67
68#define DRIVER_DESC "MAX3421 USB Host-Controller Driver"
69#define DRIVER_VERSION "1.0"
70
71/* 11-bit counter that wraps around (USB 2.0 Section 8.3.3): */
72#define USB_MAX_FRAME_NUMBER 0x7ff
73#define USB_MAX_RETRIES 3 /* # of retries before error is reported */
74
75/*
76 * Max. # of times we're willing to retransmit a request immediately in
77 * resposne to a NAK. Afterwards, we fall back on trying once a frame.
78 */
79#define NAK_MAX_FAST_RETRANSMITS 2
80
81#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
82
83/* Port-change mask: */
84#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \
85 USB_PORT_STAT_C_ENABLE | \
86 USB_PORT_STAT_C_SUSPEND | \
87 USB_PORT_STAT_C_OVERCURRENT | \
88 USB_PORT_STAT_C_RESET) << 16)
89
Jules Maselbas721fdc82017-09-15 18:58:45 +020090#define MAX3421_GPOUT_COUNT 8
91
David Mosberger2d531392014-04-28 22:14:07 -060092enum max3421_rh_state {
93 MAX3421_RH_RESET,
94 MAX3421_RH_SUSPENDED,
95 MAX3421_RH_RUNNING
96};
97
98enum pkt_state {
99 PKT_STATE_SETUP, /* waiting to send setup packet to ctrl pipe */
100 PKT_STATE_TRANSFER, /* waiting to xfer transfer_buffer */
101 PKT_STATE_TERMINATE /* waiting to terminate control transfer */
102};
103
104enum scheduling_pass {
105 SCHED_PASS_PERIODIC,
106 SCHED_PASS_NON_PERIODIC,
107 SCHED_PASS_DONE
108};
109
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -0600110/* Bit numbers for max3421_hcd->todo: */
111enum {
112 ENABLE_IRQ = 0,
113 RESET_HCD,
114 RESET_PORT,
115 CHECK_UNLINK,
116 IOPIN_UPDATE
117};
118
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600119struct max3421_dma_buf {
120 u8 data[2];
121};
122
David Mosberger2d531392014-04-28 22:14:07 -0600123struct max3421_hcd {
124 spinlock_t lock;
125
126 struct task_struct *spi_thread;
127
128 struct max3421_hcd *next;
129
130 enum max3421_rh_state rh_state;
131 /* lower 16 bits contain port status, upper 16 bits the change mask: */
132 u32 port_status;
133
134 unsigned active:1;
135
136 struct list_head ep_list; /* list of EP's with work */
137
138 /*
139 * The following are owned by spi_thread (may be accessed by
140 * SPI-thread without acquiring the HCD lock:
141 */
142 u8 rev; /* chip revision */
143 u16 frame_number;
144 /*
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600145 * kmalloc'd buffers guaranteed to be in separate (DMA)
146 * cache-lines:
147 */
148 struct max3421_dma_buf *tx;
149 struct max3421_dma_buf *rx;
150 /*
David Mosberger2d531392014-04-28 22:14:07 -0600151 * URB we're currently processing. Must not be reset to NULL
152 * unless MAX3421E chip is idle:
153 */
154 struct urb *curr_urb;
155 enum scheduling_pass sched_pass;
156 struct usb_device *loaded_dev; /* dev that's loaded into the chip */
157 int loaded_epnum; /* epnum whose toggles are loaded */
158 int urb_done; /* > 0 -> no errors, < 0: errno */
159 size_t curr_len;
160 u8 hien;
161 u8 mode;
162 u8 iopins[2];
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -0600163 unsigned long todo;
David Mosberger2d531392014-04-28 22:14:07 -0600164#ifdef DEBUG
165 unsigned long err_stat[16];
166#endif
167};
168
169struct max3421_ep {
170 struct usb_host_endpoint *ep;
171 struct list_head ep_list;
172 u32 naks;
173 u16 last_active; /* frame # this ep was last active */
174 enum pkt_state pkt_state;
175 u8 retries;
176 u8 retransmit; /* packet needs retransmission */
177};
178
179static struct max3421_hcd *max3421_hcd_list;
180
181#define MAX3421_FIFO_SIZE 64
182
183#define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */
184#define MAX3421_SPI_DIR_WR 1 /* write register to MAX3421 */
185
186/* SPI commands: */
187#define MAX3421_SPI_DIR_SHIFT 1
188#define MAX3421_SPI_REG_SHIFT 3
189
190#define MAX3421_REG_RCVFIFO 1
191#define MAX3421_REG_SNDFIFO 2
192#define MAX3421_REG_SUDFIFO 4
193#define MAX3421_REG_RCVBC 6
194#define MAX3421_REG_SNDBC 7
195#define MAX3421_REG_USBIRQ 13
196#define MAX3421_REG_USBIEN 14
197#define MAX3421_REG_USBCTL 15
198#define MAX3421_REG_CPUCTL 16
199#define MAX3421_REG_PINCTL 17
200#define MAX3421_REG_REVISION 18
201#define MAX3421_REG_IOPINS1 20
202#define MAX3421_REG_IOPINS2 21
203#define MAX3421_REG_GPINIRQ 22
204#define MAX3421_REG_GPINIEN 23
205#define MAX3421_REG_GPINPOL 24
206#define MAX3421_REG_HIRQ 25
207#define MAX3421_REG_HIEN 26
208#define MAX3421_REG_MODE 27
209#define MAX3421_REG_PERADDR 28
210#define MAX3421_REG_HCTL 29
211#define MAX3421_REG_HXFR 30
212#define MAX3421_REG_HRSL 31
213
214enum {
215 MAX3421_USBIRQ_OSCOKIRQ_BIT = 0,
216 MAX3421_USBIRQ_NOVBUSIRQ_BIT = 5,
217 MAX3421_USBIRQ_VBUSIRQ_BIT
218};
219
220enum {
221 MAX3421_CPUCTL_IE_BIT = 0,
222 MAX3421_CPUCTL_PULSEWID0_BIT = 6,
223 MAX3421_CPUCTL_PULSEWID1_BIT
224};
225
226enum {
227 MAX3421_USBCTL_PWRDOWN_BIT = 4,
228 MAX3421_USBCTL_CHIPRES_BIT
229};
230
231enum {
232 MAX3421_PINCTL_GPXA_BIT = 0,
233 MAX3421_PINCTL_GPXB_BIT,
234 MAX3421_PINCTL_POSINT_BIT,
235 MAX3421_PINCTL_INTLEVEL_BIT,
236 MAX3421_PINCTL_FDUPSPI_BIT,
237 MAX3421_PINCTL_EP0INAK_BIT,
238 MAX3421_PINCTL_EP2INAK_BIT,
239 MAX3421_PINCTL_EP3INAK_BIT,
240};
241
242enum {
243 MAX3421_HI_BUSEVENT_BIT = 0, /* bus-reset/-resume */
244 MAX3421_HI_RWU_BIT, /* remote wakeup */
245 MAX3421_HI_RCVDAV_BIT, /* receive FIFO data available */
246 MAX3421_HI_SNDBAV_BIT, /* send buffer available */
247 MAX3421_HI_SUSDN_BIT, /* suspend operation done */
248 MAX3421_HI_CONDET_BIT, /* peripheral connect/disconnect */
249 MAX3421_HI_FRAME_BIT, /* frame generator */
250 MAX3421_HI_HXFRDN_BIT, /* host transfer done */
251};
252
253enum {
254 MAX3421_HCTL_BUSRST_BIT = 0,
255 MAX3421_HCTL_FRMRST_BIT,
256 MAX3421_HCTL_SAMPLEBUS_BIT,
257 MAX3421_HCTL_SIGRSM_BIT,
258 MAX3421_HCTL_RCVTOG0_BIT,
259 MAX3421_HCTL_RCVTOG1_BIT,
260 MAX3421_HCTL_SNDTOG0_BIT,
261 MAX3421_HCTL_SNDTOG1_BIT
262};
263
264enum {
265 MAX3421_MODE_HOST_BIT = 0,
266 MAX3421_MODE_LOWSPEED_BIT,
267 MAX3421_MODE_HUBPRE_BIT,
268 MAX3421_MODE_SOFKAENAB_BIT,
269 MAX3421_MODE_SEPIRQ_BIT,
270 MAX3421_MODE_DELAYISO_BIT,
271 MAX3421_MODE_DMPULLDN_BIT,
272 MAX3421_MODE_DPPULLDN_BIT
273};
274
275enum {
276 MAX3421_HRSL_OK = 0,
277 MAX3421_HRSL_BUSY,
278 MAX3421_HRSL_BADREQ,
279 MAX3421_HRSL_UNDEF,
280 MAX3421_HRSL_NAK,
281 MAX3421_HRSL_STALL,
282 MAX3421_HRSL_TOGERR,
283 MAX3421_HRSL_WRONGPID,
284 MAX3421_HRSL_BADBC,
285 MAX3421_HRSL_PIDERR,
286 MAX3421_HRSL_PKTERR,
287 MAX3421_HRSL_CRCERR,
288 MAX3421_HRSL_KERR,
289 MAX3421_HRSL_JERR,
290 MAX3421_HRSL_TIMEOUT,
291 MAX3421_HRSL_BABBLE,
292 MAX3421_HRSL_RESULT_MASK = 0xf,
293 MAX3421_HRSL_RCVTOGRD_BIT = 4,
294 MAX3421_HRSL_SNDTOGRD_BIT,
295 MAX3421_HRSL_KSTATUS_BIT,
296 MAX3421_HRSL_JSTATUS_BIT
297};
298
299/* Return same error-codes as ohci.h:cc_to_error: */
300static const int hrsl_to_error[] = {
301 [MAX3421_HRSL_OK] = 0,
302 [MAX3421_HRSL_BUSY] = -EINVAL,
303 [MAX3421_HRSL_BADREQ] = -EINVAL,
304 [MAX3421_HRSL_UNDEF] = -EINVAL,
305 [MAX3421_HRSL_NAK] = -EAGAIN,
306 [MAX3421_HRSL_STALL] = -EPIPE,
307 [MAX3421_HRSL_TOGERR] = -EILSEQ,
308 [MAX3421_HRSL_WRONGPID] = -EPROTO,
309 [MAX3421_HRSL_BADBC] = -EREMOTEIO,
310 [MAX3421_HRSL_PIDERR] = -EPROTO,
311 [MAX3421_HRSL_PKTERR] = -EPROTO,
312 [MAX3421_HRSL_CRCERR] = -EILSEQ,
313 [MAX3421_HRSL_KERR] = -EIO,
314 [MAX3421_HRSL_JERR] = -EIO,
315 [MAX3421_HRSL_TIMEOUT] = -ETIME,
316 [MAX3421_HRSL_BABBLE] = -EOVERFLOW
317};
318
319/*
Alexander A. Klimovffeb1e92020-07-19 18:09:10 +0200320 * See https://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a
David Mosberger2d531392014-04-28 22:14:07 -0600321 * reasonable overview of how control transfers use the the IN/OUT
322 * tokens.
323 */
324#define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */
325#define MAX3421_HXFR_SETUP 0x10
326#define MAX3421_HXFR_BULK_OUT(ep) (0x20 | (ep)) /* bulk or interrupt */
327#define MAX3421_HXFR_ISO_IN(ep) (0x40 | (ep))
328#define MAX3421_HXFR_ISO_OUT(ep) (0x60 | (ep))
329#define MAX3421_HXFR_HS_IN 0x80 /* handshake in */
330#define MAX3421_HXFR_HS_OUT 0xa0 /* handshake out */
331
332#define field(val, bit) ((val) << (bit))
333
334static inline s16
335frame_diff(u16 left, u16 right)
336{
337 return ((unsigned) (left - right)) % (USB_MAX_FRAME_NUMBER + 1);
338}
339
340static inline struct max3421_hcd *
341hcd_to_max3421(struct usb_hcd *hcd)
342{
343 return (struct max3421_hcd *) hcd->hcd_priv;
344}
345
346static inline struct usb_hcd *
347max3421_to_hcd(struct max3421_hcd *max3421_hcd)
348{
349 return container_of((void *) max3421_hcd, struct usb_hcd, hcd_priv);
350}
351
352static u8
353spi_rd8(struct usb_hcd *hcd, unsigned int reg)
354{
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600355 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600356 struct spi_device *spi = to_spi_device(hcd->self.controller);
357 struct spi_transfer transfer;
David Mosberger2d531392014-04-28 22:14:07 -0600358 struct spi_message msg;
359
360 memset(&transfer, 0, sizeof(transfer));
361
362 spi_message_init(&msg);
363
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600364 max3421_hcd->tx->data[0] =
365 (field(reg, MAX3421_SPI_REG_SHIFT) |
366 field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
David Mosberger2d531392014-04-28 22:14:07 -0600367
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600368 transfer.tx_buf = max3421_hcd->tx->data;
369 transfer.rx_buf = max3421_hcd->rx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600370 transfer.len = 2;
371
372 spi_message_add_tail(&transfer, &msg);
373 spi_sync(spi, &msg);
374
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600375 return max3421_hcd->rx->data[1];
David Mosberger2d531392014-04-28 22:14:07 -0600376}
377
378static void
379spi_wr8(struct usb_hcd *hcd, unsigned int reg, u8 val)
380{
381 struct spi_device *spi = to_spi_device(hcd->self.controller);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600382 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600383 struct spi_transfer transfer;
384 struct spi_message msg;
David Mosberger2d531392014-04-28 22:14:07 -0600385
386 memset(&transfer, 0, sizeof(transfer));
387
388 spi_message_init(&msg);
389
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600390 max3421_hcd->tx->data[0] =
391 (field(reg, MAX3421_SPI_REG_SHIFT) |
392 field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
393 max3421_hcd->tx->data[1] = val;
David Mosberger2d531392014-04-28 22:14:07 -0600394
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600395 transfer.tx_buf = max3421_hcd->tx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600396 transfer.len = 2;
397
398 spi_message_add_tail(&transfer, &msg);
399 spi_sync(spi, &msg);
400}
401
402static void
403spi_rd_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
404{
405 struct spi_device *spi = to_spi_device(hcd->self.controller);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600406 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600407 struct spi_transfer transfer[2];
408 struct spi_message msg;
David Mosberger2d531392014-04-28 22:14:07 -0600409
410 memset(transfer, 0, sizeof(transfer));
411
412 spi_message_init(&msg);
413
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600414 max3421_hcd->tx->data[0] =
415 (field(reg, MAX3421_SPI_REG_SHIFT) |
416 field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
417 transfer[0].tx_buf = max3421_hcd->tx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600418 transfer[0].len = 1;
419
420 transfer[1].rx_buf = buf;
421 transfer[1].len = len;
422
423 spi_message_add_tail(&transfer[0], &msg);
424 spi_message_add_tail(&transfer[1], &msg);
425 spi_sync(spi, &msg);
426}
427
428static void
429spi_wr_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
430{
431 struct spi_device *spi = to_spi_device(hcd->self.controller);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600432 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600433 struct spi_transfer transfer[2];
434 struct spi_message msg;
David Mosberger2d531392014-04-28 22:14:07 -0600435
436 memset(transfer, 0, sizeof(transfer));
437
438 spi_message_init(&msg);
439
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600440 max3421_hcd->tx->data[0] =
441 (field(reg, MAX3421_SPI_REG_SHIFT) |
442 field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
David Mosberger2d531392014-04-28 22:14:07 -0600443
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600444 transfer[0].tx_buf = max3421_hcd->tx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600445 transfer[0].len = 1;
446
447 transfer[1].tx_buf = buf;
448 transfer[1].len = len;
449
450 spi_message_add_tail(&transfer[0], &msg);
451 spi_message_add_tail(&transfer[1], &msg);
452 spi_sync(spi, &msg);
453}
454
455/*
456 * Figure out the correct setting for the LOWSPEED and HUBPRE mode
457 * bits. The HUBPRE bit needs to be set when MAX3421E operates at
458 * full speed, but it's talking to a low-speed device (i.e., through a
459 * hub). Setting that bit ensures that every low-speed packet is
460 * preceded by a full-speed PRE PID. Possible configurations:
461 *
462 * Hub speed: Device speed: => LOWSPEED bit: HUBPRE bit:
463 * FULL FULL => 0 0
464 * FULL LOW => 1 1
465 * LOW LOW => 1 0
466 * LOW FULL => 1 0
467 */
468static void
469max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
470{
471 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
472 u8 mode_lowspeed, mode_hubpre, mode = max3421_hcd->mode;
473
474 mode_lowspeed = BIT(MAX3421_MODE_LOWSPEED_BIT);
475 mode_hubpre = BIT(MAX3421_MODE_HUBPRE_BIT);
476 if (max3421_hcd->port_status & USB_PORT_STAT_LOW_SPEED) {
477 mode |= mode_lowspeed;
478 mode &= ~mode_hubpre;
479 } else if (dev->speed == USB_SPEED_LOW) {
480 mode |= mode_lowspeed | mode_hubpre;
481 } else {
482 mode &= ~(mode_lowspeed | mode_hubpre);
483 }
484 if (mode != max3421_hcd->mode) {
485 max3421_hcd->mode = mode;
486 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
487 }
488
489}
490
491/*
492 * Caller must NOT hold HCD spinlock.
493 */
494static void
495max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
496 int force_toggles)
497{
498 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
499 int old_epnum, same_ep, rcvtog, sndtog;
500 struct usb_device *old_dev;
501 u8 hctl;
502
503 old_dev = max3421_hcd->loaded_dev;
504 old_epnum = max3421_hcd->loaded_epnum;
505
506 same_ep = (dev == old_dev && epnum == old_epnum);
507 if (same_ep && !force_toggles)
508 return;
509
510 if (old_dev && !same_ep) {
511 /* save the old end-points toggles: */
512 u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
513
514 rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
515 sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
516
517 /* no locking: HCD (i.e., we) own toggles, don't we? */
518 usb_settoggle(old_dev, old_epnum, 0, rcvtog);
519 usb_settoggle(old_dev, old_epnum, 1, sndtog);
520 }
521 /* setup new endpoint's toggle bits: */
522 rcvtog = usb_gettoggle(dev, epnum, 0);
523 sndtog = usb_gettoggle(dev, epnum, 1);
524 hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
525 BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
526
527 max3421_hcd->loaded_epnum = epnum;
528 spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
529
530 /*
531 * Note: devnum for one and the same device can change during
532 * address-assignment so it's best to just always load the
533 * address whenever the end-point changed/was forced.
534 */
535 max3421_hcd->loaded_dev = dev;
536 spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
537}
538
539static int
540max3421_ctrl_setup(struct usb_hcd *hcd, struct urb *urb)
541{
542 spi_wr_buf(hcd, MAX3421_REG_SUDFIFO, urb->setup_packet, 8);
543 return MAX3421_HXFR_SETUP;
544}
545
546static int
547max3421_transfer_in(struct usb_hcd *hcd, struct urb *urb)
548{
549 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
550 int epnum = usb_pipeendpoint(urb->pipe);
551
552 max3421_hcd->curr_len = 0;
553 max3421_hcd->hien |= BIT(MAX3421_HI_RCVDAV_BIT);
554 return MAX3421_HXFR_BULK_IN(epnum);
555}
556
557static int
558max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
559{
560 struct spi_device *spi = to_spi_device(hcd->self.controller);
561 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
562 int epnum = usb_pipeendpoint(urb->pipe);
563 u32 max_packet;
564 void *src;
565
566 src = urb->transfer_buffer + urb->actual_length;
567
568 if (fast_retransmit) {
569 if (max3421_hcd->rev == 0x12) {
570 /* work around rev 0x12 bug: */
571 spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
572 spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]);
573 spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
574 }
575 return MAX3421_HXFR_BULK_OUT(epnum);
576 }
577
578 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
579
580 if (max_packet > MAX3421_FIFO_SIZE) {
581 /*
582 * We do not support isochronous transfers at this
583 * time.
584 */
585 dev_err(&spi->dev,
586 "%s: packet-size of %u too big (limit is %u bytes)",
587 __func__, max_packet, MAX3421_FIFO_SIZE);
588 max3421_hcd->urb_done = -EMSGSIZE;
589 return -EMSGSIZE;
590 }
591 max3421_hcd->curr_len = min((urb->transfer_buffer_length -
592 urb->actual_length), max_packet);
593
594 spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len);
595 spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
596 return MAX3421_HXFR_BULK_OUT(epnum);
597}
598
599/*
600 * Issue the next host-transfer command.
601 * Caller must NOT hold HCD spinlock.
602 */
603static void
604max3421_next_transfer(struct usb_hcd *hcd, int fast_retransmit)
605{
606 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
607 struct urb *urb = max3421_hcd->curr_urb;
David Mosberger-Tangf9da25c2014-05-28 10:06:24 -0600608 struct max3421_ep *max3421_ep;
David Mosberger2d531392014-04-28 22:14:07 -0600609 int cmd = -EINVAL;
610
611 if (!urb)
612 return; /* nothing to do */
613
David Mosberger-Tangf9da25c2014-05-28 10:06:24 -0600614 max3421_ep = urb->ep->hcpriv;
615
David Mosberger2d531392014-04-28 22:14:07 -0600616 switch (max3421_ep->pkt_state) {
617 case PKT_STATE_SETUP:
618 cmd = max3421_ctrl_setup(hcd, urb);
619 break;
620
621 case PKT_STATE_TRANSFER:
622 if (usb_urb_dir_in(urb))
623 cmd = max3421_transfer_in(hcd, urb);
624 else
625 cmd = max3421_transfer_out(hcd, urb, fast_retransmit);
626 break;
627
628 case PKT_STATE_TERMINATE:
629 /*
630 * IN transfers are terminated with HS_OUT token,
631 * OUT transfers with HS_IN:
632 */
633 if (usb_urb_dir_in(urb))
634 cmd = MAX3421_HXFR_HS_OUT;
635 else
636 cmd = MAX3421_HXFR_HS_IN;
637 break;
638 }
639
640 if (cmd < 0)
641 return;
642
643 /* issue the command and wait for host-xfer-done interrupt: */
644
645 spi_wr8(hcd, MAX3421_REG_HXFR, cmd);
646 max3421_hcd->hien |= BIT(MAX3421_HI_HXFRDN_BIT);
647}
648
649/*
650 * Find the next URB to process and start its execution.
651 *
652 * At this time, we do not anticipate ever connecting a USB hub to the
653 * MAX3421 chip, so at most USB device can be connected and we can use
654 * a simplistic scheduler: at the start of a frame, schedule all
655 * periodic transfers. Once that is done, use the remainder of the
656 * frame to process non-periodic (bulk & control) transfers.
657 *
658 * Preconditions:
659 * o Caller must NOT hold HCD spinlock.
660 * o max3421_hcd->curr_urb MUST BE NULL.
661 * o MAX3421E chip must be idle.
662 */
663static int
664max3421_select_and_start_urb(struct usb_hcd *hcd)
665{
666 struct spi_device *spi = to_spi_device(hcd->self.controller);
667 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
668 struct urb *urb, *curr_urb = NULL;
669 struct max3421_ep *max3421_ep;
670 int epnum, force_toggles = 0;
671 struct usb_host_endpoint *ep;
672 struct list_head *pos;
673 unsigned long flags;
674
675 spin_lock_irqsave(&max3421_hcd->lock, flags);
676
677 for (;
678 max3421_hcd->sched_pass < SCHED_PASS_DONE;
679 ++max3421_hcd->sched_pass)
680 list_for_each(pos, &max3421_hcd->ep_list) {
681 urb = NULL;
682 max3421_ep = container_of(pos, struct max3421_ep,
683 ep_list);
684 ep = max3421_ep->ep;
685
686 switch (usb_endpoint_type(&ep->desc)) {
687 case USB_ENDPOINT_XFER_ISOC:
688 case USB_ENDPOINT_XFER_INT:
689 if (max3421_hcd->sched_pass !=
690 SCHED_PASS_PERIODIC)
691 continue;
692 break;
693
694 case USB_ENDPOINT_XFER_CONTROL:
695 case USB_ENDPOINT_XFER_BULK:
696 if (max3421_hcd->sched_pass !=
697 SCHED_PASS_NON_PERIODIC)
698 continue;
699 break;
700 }
701
702 if (list_empty(&ep->urb_list))
703 continue; /* nothing to do */
704 urb = list_first_entry(&ep->urb_list, struct urb,
705 urb_list);
706 if (urb->unlinked) {
707 dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
708 __func__, urb, urb->unlinked);
709 max3421_hcd->curr_urb = urb;
710 max3421_hcd->urb_done = 1;
711 spin_unlock_irqrestore(&max3421_hcd->lock,
712 flags);
713 return 1;
714 }
715
716 switch (usb_endpoint_type(&ep->desc)) {
717 case USB_ENDPOINT_XFER_CONTROL:
718 /*
719 * Allow one control transaction per
720 * frame per endpoint:
721 */
722 if (frame_diff(max3421_ep->last_active,
723 max3421_hcd->frame_number) == 0)
724 continue;
725 break;
726
727 case USB_ENDPOINT_XFER_BULK:
728 if (max3421_ep->retransmit
729 && (frame_diff(max3421_ep->last_active,
730 max3421_hcd->frame_number)
731 == 0))
732 /*
733 * We already tried this EP
734 * during this frame and got a
735 * NAK or error; wait for next frame
736 */
737 continue;
738 break;
739
740 case USB_ENDPOINT_XFER_ISOC:
741 case USB_ENDPOINT_XFER_INT:
742 if (frame_diff(max3421_hcd->frame_number,
743 max3421_ep->last_active)
744 < urb->interval)
745 /*
746 * We already processed this
747 * end-point in the current
748 * frame
749 */
750 continue;
751 break;
752 }
753
754 /* move current ep to tail: */
755 list_move_tail(pos, &max3421_hcd->ep_list);
756 curr_urb = urb;
757 goto done;
758 }
759done:
760 if (!curr_urb) {
761 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
762 return 0;
763 }
764
765 urb = max3421_hcd->curr_urb = curr_urb;
766 epnum = usb_endpoint_num(&urb->ep->desc);
767 if (max3421_ep->retransmit)
768 /* restart (part of) a USB transaction: */
769 max3421_ep->retransmit = 0;
770 else {
771 /* start USB transaction: */
772 if (usb_endpoint_xfer_control(&ep->desc)) {
773 /*
774 * See USB 2.0 spec section 8.6.1
775 * Initialization via SETUP Token:
776 */
777 usb_settoggle(urb->dev, epnum, 0, 1);
778 usb_settoggle(urb->dev, epnum, 1, 1);
779 max3421_ep->pkt_state = PKT_STATE_SETUP;
780 force_toggles = 1;
781 } else
782 max3421_ep->pkt_state = PKT_STATE_TRANSFER;
783 }
784
785 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
786
787 max3421_ep->last_active = max3421_hcd->frame_number;
788 max3421_set_address(hcd, urb->dev, epnum, force_toggles);
789 max3421_set_speed(hcd, urb->dev);
790 max3421_next_transfer(hcd, 0);
791 return 1;
792}
793
794/*
795 * Check all endpoints for URBs that got unlinked.
796 *
797 * Caller must NOT hold HCD spinlock.
798 */
799static int
800max3421_check_unlink(struct usb_hcd *hcd)
801{
802 struct spi_device *spi = to_spi_device(hcd->self.controller);
803 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600804 struct max3421_ep *max3421_ep;
805 struct usb_host_endpoint *ep;
Geliang Tang553c2362015-12-20 00:11:50 +0800806 struct urb *urb, *next;
David Mosberger2d531392014-04-28 22:14:07 -0600807 unsigned long flags;
808 int retval = 0;
809
810 spin_lock_irqsave(&max3421_hcd->lock, flags);
Geliang Tang553c2362015-12-20 00:11:50 +0800811 list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
David Mosberger2d531392014-04-28 22:14:07 -0600812 ep = max3421_ep->ep;
Geliang Tang553c2362015-12-20 00:11:50 +0800813 list_for_each_entry_safe(urb, next, &ep->urb_list, urb_list) {
David Mosberger2d531392014-04-28 22:14:07 -0600814 if (urb->unlinked) {
815 retval = 1;
816 dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
817 __func__, urb, urb->unlinked);
818 usb_hcd_unlink_urb_from_ep(hcd, urb);
819 spin_unlock_irqrestore(&max3421_hcd->lock,
820 flags);
821 usb_hcd_giveback_urb(hcd, urb, 0);
822 spin_lock_irqsave(&max3421_hcd->lock, flags);
823 }
824 }
825 }
826 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
827 return retval;
828}
829
830/*
831 * Caller must NOT hold HCD spinlock.
832 */
833static void
834max3421_slow_retransmit(struct usb_hcd *hcd)
835{
836 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
837 struct urb *urb = max3421_hcd->curr_urb;
838 struct max3421_ep *max3421_ep;
839
840 max3421_ep = urb->ep->hcpriv;
841 max3421_ep->retransmit = 1;
842 max3421_hcd->curr_urb = NULL;
843}
844
845/*
846 * Caller must NOT hold HCD spinlock.
847 */
848static void
849max3421_recv_data_available(struct usb_hcd *hcd)
850{
851 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
852 struct urb *urb = max3421_hcd->curr_urb;
853 size_t remaining, transfer_size;
854 u8 rcvbc;
855
856 rcvbc = spi_rd8(hcd, MAX3421_REG_RCVBC);
857
858 if (rcvbc > MAX3421_FIFO_SIZE)
859 rcvbc = MAX3421_FIFO_SIZE;
860 if (urb->actual_length >= urb->transfer_buffer_length)
861 remaining = 0;
862 else
863 remaining = urb->transfer_buffer_length - urb->actual_length;
864 transfer_size = rcvbc;
865 if (transfer_size > remaining)
866 transfer_size = remaining;
867 if (transfer_size > 0) {
868 void *dst = urb->transfer_buffer + urb->actual_length;
869
870 spi_rd_buf(hcd, MAX3421_REG_RCVFIFO, dst, transfer_size);
871 urb->actual_length += transfer_size;
872 max3421_hcd->curr_len = transfer_size;
873 }
874
875 /* ack the RCVDAV irq now that the FIFO has been read: */
876 spi_wr8(hcd, MAX3421_REG_HIRQ, BIT(MAX3421_HI_RCVDAV_BIT));
877}
878
879static void
880max3421_handle_error(struct usb_hcd *hcd, u8 hrsl)
881{
882 struct spi_device *spi = to_spi_device(hcd->self.controller);
883 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
884 u8 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
885 struct urb *urb = max3421_hcd->curr_urb;
886 struct max3421_ep *max3421_ep = urb->ep->hcpriv;
887 int switch_sndfifo;
888
889 /*
890 * If an OUT command results in any response other than OK
891 * (i.e., error or NAK), we have to perform a dummy-write to
892 * SNDBC so the FIFO gets switched back to us. Otherwise, we
893 * get out of sync with the SNDFIFO double buffer.
894 */
895 switch_sndfifo = (max3421_ep->pkt_state == PKT_STATE_TRANSFER &&
896 usb_urb_dir_out(urb));
897
898 switch (result_code) {
899 case MAX3421_HRSL_OK:
900 return; /* this shouldn't happen */
901
902 case MAX3421_HRSL_WRONGPID: /* received wrong PID */
903 case MAX3421_HRSL_BUSY: /* SIE busy */
904 case MAX3421_HRSL_BADREQ: /* bad val in HXFR */
905 case MAX3421_HRSL_UNDEF: /* reserved */
906 case MAX3421_HRSL_KERR: /* K-state instead of response */
907 case MAX3421_HRSL_JERR: /* J-state instead of response */
908 /*
909 * packet experienced an error that we cannot recover
910 * from; report error
911 */
912 max3421_hcd->urb_done = hrsl_to_error[result_code];
913 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
914 __func__, hrsl);
915 break;
916
917 case MAX3421_HRSL_TOGERR:
918 if (usb_urb_dir_in(urb))
919 ; /* don't do anything (device will switch toggle) */
920 else {
921 /* flip the send toggle bit: */
922 int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
923
924 sndtog ^= 1;
925 spi_wr8(hcd, MAX3421_REG_HCTL,
926 BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
927 }
Gustavo A. R. Silva0d9b6d42020-07-07 14:56:07 -0500928 fallthrough;
David Mosberger2d531392014-04-28 22:14:07 -0600929 case MAX3421_HRSL_BADBC: /* bad byte count */
930 case MAX3421_HRSL_PIDERR: /* received PID is corrupted */
931 case MAX3421_HRSL_PKTERR: /* packet error (stuff, EOP) */
932 case MAX3421_HRSL_CRCERR: /* CRC error */
933 case MAX3421_HRSL_BABBLE: /* device talked too long */
934 case MAX3421_HRSL_TIMEOUT:
935 if (max3421_ep->retries++ < USB_MAX_RETRIES)
936 /* retry the packet again in the next frame */
937 max3421_slow_retransmit(hcd);
938 else {
939 /* Based on ohci.h cc_to_err[]: */
940 max3421_hcd->urb_done = hrsl_to_error[result_code];
941 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
942 __func__, hrsl);
943 }
944 break;
945
946 case MAX3421_HRSL_STALL:
947 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
948 __func__, hrsl);
949 max3421_hcd->urb_done = hrsl_to_error[result_code];
950 break;
951
952 case MAX3421_HRSL_NAK:
953 /*
954 * Device wasn't ready for data or has no data
955 * available: retry the packet again.
956 */
957 if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) {
958 max3421_next_transfer(hcd, 1);
959 switch_sndfifo = 0;
960 } else
961 max3421_slow_retransmit(hcd);
962 break;
963 }
964 if (switch_sndfifo)
965 spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
966}
967
968/*
969 * Caller must NOT hold HCD spinlock.
970 */
971static int
972max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
973{
974 struct spi_device *spi = to_spi_device(hcd->self.controller);
975 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
976 u32 max_packet;
977
978 if (urb->actual_length >= urb->transfer_buffer_length)
979 return 1; /* read is complete, so we're done */
980
981 /*
982 * USB 2.0 Section 5.3.2 Pipes: packets must be full size
983 * except for last one.
984 */
985 max_packet = usb_maxpacket(urb->dev, urb->pipe, 0);
986 if (max_packet > MAX3421_FIFO_SIZE) {
987 /*
988 * We do not support isochronous transfers at this
989 * time...
990 */
991 dev_err(&spi->dev,
992 "%s: packet-size of %u too big (limit is %u bytes)",
993 __func__, max_packet, MAX3421_FIFO_SIZE);
994 return -EINVAL;
995 }
996
997 if (max3421_hcd->curr_len < max_packet) {
998 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
999 /*
1000 * remaining > 0 and received an
1001 * unexpected partial packet ->
1002 * error
1003 */
1004 return -EREMOTEIO;
1005 } else
1006 /* short read, but it's OK */
1007 return 1;
1008 }
1009 return 0; /* not done */
1010}
1011
1012/*
1013 * Caller must NOT hold HCD spinlock.
1014 */
1015static int
1016max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
1017{
1018 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1019
1020 urb->actual_length += max3421_hcd->curr_len;
1021 if (urb->actual_length < urb->transfer_buffer_length)
1022 return 0;
1023 if (urb->transfer_flags & URB_ZERO_PACKET) {
1024 /*
1025 * Some hardware needs a zero-size packet at the end
1026 * of a bulk-out transfer if the last transfer was a
1027 * full-sized packet (i.e., such hardware use <
1028 * max_packet as an indicator that the end of the
1029 * packet has been reached).
1030 */
1031 u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
1032
1033 if (max3421_hcd->curr_len == max_packet)
1034 return 0;
1035 }
1036 return 1;
1037}
1038
1039/*
1040 * Caller must NOT hold HCD spinlock.
1041 */
1042static void
1043max3421_host_transfer_done(struct usb_hcd *hcd)
1044{
1045 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1046 struct urb *urb = max3421_hcd->curr_urb;
1047 struct max3421_ep *max3421_ep;
1048 u8 result_code, hrsl;
1049 int urb_done = 0;
1050
1051 max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
1052 BIT(MAX3421_HI_RCVDAV_BIT));
1053
1054 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
1055 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
1056
1057#ifdef DEBUG
1058 ++max3421_hcd->err_stat[result_code];
1059#endif
1060
1061 max3421_ep = urb->ep->hcpriv;
1062
1063 if (unlikely(result_code != MAX3421_HRSL_OK)) {
1064 max3421_handle_error(hcd, hrsl);
1065 return;
1066 }
1067
1068 max3421_ep->naks = 0;
1069 max3421_ep->retries = 0;
1070 switch (max3421_ep->pkt_state) {
1071
1072 case PKT_STATE_SETUP:
1073 if (urb->transfer_buffer_length > 0)
1074 max3421_ep->pkt_state = PKT_STATE_TRANSFER;
1075 else
1076 max3421_ep->pkt_state = PKT_STATE_TERMINATE;
1077 break;
1078
1079 case PKT_STATE_TRANSFER:
1080 if (usb_urb_dir_in(urb))
1081 urb_done = max3421_transfer_in_done(hcd, urb);
1082 else
1083 urb_done = max3421_transfer_out_done(hcd, urb);
1084 if (urb_done > 0 && usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1085 /*
1086 * We aren't really done - we still need to
1087 * terminate the control transfer:
1088 */
1089 max3421_hcd->urb_done = urb_done = 0;
1090 max3421_ep->pkt_state = PKT_STATE_TERMINATE;
1091 }
1092 break;
1093
1094 case PKT_STATE_TERMINATE:
1095 urb_done = 1;
1096 break;
1097 }
1098
1099 if (urb_done)
1100 max3421_hcd->urb_done = urb_done;
1101 else
1102 max3421_next_transfer(hcd, 0);
1103}
1104
1105/*
1106 * Caller must NOT hold HCD spinlock.
1107 */
1108static void
1109max3421_detect_conn(struct usb_hcd *hcd)
1110{
1111 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1112 unsigned int jk, have_conn = 0;
1113 u32 old_port_status, chg;
1114 unsigned long flags;
1115 u8 hrsl, mode;
1116
1117 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
1118
1119 jk = ((((hrsl >> MAX3421_HRSL_JSTATUS_BIT) & 1) << 0) |
1120 (((hrsl >> MAX3421_HRSL_KSTATUS_BIT) & 1) << 1));
1121
1122 mode = max3421_hcd->mode;
1123
1124 switch (jk) {
1125 case 0x0: /* SE0: disconnect */
1126 /*
1127 * Turn off SOFKAENAB bit to avoid getting interrupt
1128 * every milli-second:
1129 */
1130 mode &= ~BIT(MAX3421_MODE_SOFKAENAB_BIT);
1131 break;
1132
1133 case 0x1: /* J=0,K=1: low-speed (in full-speed or vice versa) */
1134 case 0x2: /* J=1,K=0: full-speed (in full-speed or vice versa) */
1135 if (jk == 0x2)
1136 /* need to switch to the other speed: */
1137 mode ^= BIT(MAX3421_MODE_LOWSPEED_BIT);
1138 /* turn on SOFKAENAB bit: */
1139 mode |= BIT(MAX3421_MODE_SOFKAENAB_BIT);
1140 have_conn = 1;
1141 break;
1142
1143 case 0x3: /* illegal */
1144 break;
1145 }
1146
1147 max3421_hcd->mode = mode;
1148 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
1149
1150 spin_lock_irqsave(&max3421_hcd->lock, flags);
1151 old_port_status = max3421_hcd->port_status;
1152 if (have_conn)
1153 max3421_hcd->port_status |= USB_PORT_STAT_CONNECTION;
1154 else
1155 max3421_hcd->port_status &= ~USB_PORT_STAT_CONNECTION;
1156 if (mode & BIT(MAX3421_MODE_LOWSPEED_BIT))
1157 max3421_hcd->port_status |= USB_PORT_STAT_LOW_SPEED;
1158 else
1159 max3421_hcd->port_status &= ~USB_PORT_STAT_LOW_SPEED;
1160 chg = (old_port_status ^ max3421_hcd->port_status);
1161 max3421_hcd->port_status |= chg << 16;
1162 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1163}
1164
1165static irqreturn_t
1166max3421_irq_handler(int irq, void *dev_id)
1167{
1168 struct usb_hcd *hcd = dev_id;
1169 struct spi_device *spi = to_spi_device(hcd->self.controller);
1170 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1171
Peter Zijlstra37aadc682021-06-11 10:28:11 +02001172 if (max3421_hcd->spi_thread)
David Mosberger2d531392014-04-28 22:14:07 -06001173 wake_up_process(max3421_hcd->spi_thread);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001174 if (!test_and_set_bit(ENABLE_IRQ, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001175 disable_irq_nosync(spi->irq);
David Mosberger2d531392014-04-28 22:14:07 -06001176 return IRQ_HANDLED;
1177}
1178
1179#ifdef DEBUG
1180
1181static void
1182dump_eps(struct usb_hcd *hcd)
1183{
1184 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1185 struct max3421_ep *max3421_ep;
1186 struct usb_host_endpoint *ep;
David Mosberger2d531392014-04-28 22:14:07 -06001187 char ubuf[512], *dp, *end;
1188 unsigned long flags;
1189 struct urb *urb;
1190 int epnum, ret;
1191
1192 spin_lock_irqsave(&max3421_hcd->lock, flags);
Geliang Tang553c2362015-12-20 00:11:50 +08001193 list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
David Mosberger2d531392014-04-28 22:14:07 -06001194 ep = max3421_ep->ep;
1195
1196 dp = ubuf;
1197 end = dp + sizeof(ubuf);
1198 *dp = '\0';
Geliang Tang553c2362015-12-20 00:11:50 +08001199 list_for_each_entry(urb, &ep->urb_list, urb_list) {
David Mosberger2d531392014-04-28 22:14:07 -06001200 ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
1201 usb_pipetype(urb->pipe),
1202 usb_urb_dir_in(urb) ? "IN" : "OUT",
1203 urb->actual_length,
1204 urb->transfer_buffer_length);
1205 if (ret < 0 || ret >= end - dp)
1206 break; /* error or buffer full */
1207 dp += ret;
1208 }
1209
1210 epnum = usb_endpoint_num(&ep->desc);
1211 pr_info("EP%0u %u lst %04u rtr %u nak %6u rxmt %u: %s\n",
1212 epnum, max3421_ep->pkt_state, max3421_ep->last_active,
1213 max3421_ep->retries, max3421_ep->naks,
1214 max3421_ep->retransmit, ubuf);
1215 }
1216 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1217}
1218
1219#endif /* DEBUG */
1220
1221/* Return zero if no work was performed, 1 otherwise. */
1222static int
1223max3421_handle_irqs(struct usb_hcd *hcd)
1224{
1225 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1226 u32 chg, old_port_status;
1227 unsigned long flags;
1228 u8 hirq;
1229
1230 /*
1231 * Read and ack pending interrupts (CPU must never
1232 * clear SNDBAV directly and RCVDAV must be cleared by
1233 * max3421_recv_data_available()!):
1234 */
1235 hirq = spi_rd8(hcd, MAX3421_REG_HIRQ);
1236 hirq &= max3421_hcd->hien;
1237 if (!hirq)
1238 return 0;
1239
1240 spi_wr8(hcd, MAX3421_REG_HIRQ,
1241 hirq & ~(BIT(MAX3421_HI_SNDBAV_BIT) |
1242 BIT(MAX3421_HI_RCVDAV_BIT)));
1243
1244 if (hirq & BIT(MAX3421_HI_FRAME_BIT)) {
1245 max3421_hcd->frame_number = ((max3421_hcd->frame_number + 1)
1246 & USB_MAX_FRAME_NUMBER);
1247 max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
1248 }
1249
1250 if (hirq & BIT(MAX3421_HI_RCVDAV_BIT))
1251 max3421_recv_data_available(hcd);
1252
1253 if (hirq & BIT(MAX3421_HI_HXFRDN_BIT))
1254 max3421_host_transfer_done(hcd);
1255
1256 if (hirq & BIT(MAX3421_HI_CONDET_BIT))
1257 max3421_detect_conn(hcd);
1258
1259 /*
1260 * Now process interrupts that may affect HCD state
1261 * other than the end-points:
1262 */
1263 spin_lock_irqsave(&max3421_hcd->lock, flags);
1264
1265 old_port_status = max3421_hcd->port_status;
1266 if (hirq & BIT(MAX3421_HI_BUSEVENT_BIT)) {
1267 if (max3421_hcd->port_status & USB_PORT_STAT_RESET) {
1268 /* BUSEVENT due to completion of Bus Reset */
1269 max3421_hcd->port_status &= ~USB_PORT_STAT_RESET;
1270 max3421_hcd->port_status |= USB_PORT_STAT_ENABLE;
1271 } else {
1272 /* BUSEVENT due to completion of Bus Resume */
1273 pr_info("%s: BUSEVENT Bus Resume Done\n", __func__);
1274 }
1275 }
1276 if (hirq & BIT(MAX3421_HI_RWU_BIT))
1277 pr_info("%s: RWU\n", __func__);
1278 if (hirq & BIT(MAX3421_HI_SUSDN_BIT))
1279 pr_info("%s: SUSDN\n", __func__);
1280
1281 chg = (old_port_status ^ max3421_hcd->port_status);
1282 max3421_hcd->port_status |= chg << 16;
1283
1284 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1285
1286#ifdef DEBUG
1287 {
1288 static unsigned long last_time;
1289 char sbuf[16 * 16], *dp, *end;
1290 int i;
1291
Asaf Vertz788bfe82014-12-15 09:22:07 +02001292 if (time_after(jiffies, last_time + 5*HZ)) {
David Mosberger2d531392014-04-28 22:14:07 -06001293 dp = sbuf;
1294 end = sbuf + sizeof(sbuf);
1295 *dp = '\0';
1296 for (i = 0; i < 16; ++i) {
1297 int ret = snprintf(dp, end - dp, " %lu",
1298 max3421_hcd->err_stat[i]);
1299 if (ret < 0 || ret >= end - dp)
1300 break; /* error or buffer full */
1301 dp += ret;
1302 }
1303 pr_info("%s: hrsl_stats %s\n", __func__, sbuf);
1304 memset(max3421_hcd->err_stat, 0,
1305 sizeof(max3421_hcd->err_stat));
1306 last_time = jiffies;
1307
1308 dump_eps(hcd);
1309 }
1310 }
1311#endif
1312 return 1;
1313}
1314
1315static int
1316max3421_reset_hcd(struct usb_hcd *hcd)
1317{
1318 struct spi_device *spi = to_spi_device(hcd->self.controller);
1319 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1320 int timeout;
1321
1322 /* perform a chip reset and wait for OSCIRQ signal to appear: */
1323 spi_wr8(hcd, MAX3421_REG_USBCTL, BIT(MAX3421_USBCTL_CHIPRES_BIT));
1324 /* clear reset: */
1325 spi_wr8(hcd, MAX3421_REG_USBCTL, 0);
1326 timeout = 1000;
1327 while (1) {
1328 if (spi_rd8(hcd, MAX3421_REG_USBIRQ)
1329 & BIT(MAX3421_USBIRQ_OSCOKIRQ_BIT))
1330 break;
1331 if (--timeout < 0) {
1332 dev_err(&spi->dev,
1333 "timed out waiting for oscillator OK signal");
1334 return 1;
1335 }
1336 cond_resched();
1337 }
1338
1339 /*
1340 * Turn on host mode, automatic generation of SOF packets, and
1341 * enable pull-down registers on DM/DP:
1342 */
1343 max3421_hcd->mode = (BIT(MAX3421_MODE_HOST_BIT) |
1344 BIT(MAX3421_MODE_SOFKAENAB_BIT) |
1345 BIT(MAX3421_MODE_DMPULLDN_BIT) |
1346 BIT(MAX3421_MODE_DPPULLDN_BIT));
1347 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
1348
1349 /* reset frame-number: */
1350 max3421_hcd->frame_number = USB_MAX_FRAME_NUMBER;
1351 spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_FRMRST_BIT));
1352
1353 /* sample the state of the D+ and D- lines */
1354 spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_SAMPLEBUS_BIT));
1355 max3421_detect_conn(hcd);
1356
1357 /* enable frame, connection-detected, and bus-event interrupts: */
1358 max3421_hcd->hien = (BIT(MAX3421_HI_FRAME_BIT) |
1359 BIT(MAX3421_HI_CONDET_BIT) |
1360 BIT(MAX3421_HI_BUSEVENT_BIT));
1361 spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
1362
1363 /* enable interrupts: */
1364 spi_wr8(hcd, MAX3421_REG_CPUCTL, BIT(MAX3421_CPUCTL_IE_BIT));
1365 return 1;
1366}
1367
1368static int
1369max3421_urb_done(struct usb_hcd *hcd)
1370{
1371 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1372 unsigned long flags;
1373 struct urb *urb;
1374 int status;
1375
1376 status = max3421_hcd->urb_done;
1377 max3421_hcd->urb_done = 0;
1378 if (status > 0)
1379 status = 0;
1380 urb = max3421_hcd->curr_urb;
1381 if (urb) {
1382 max3421_hcd->curr_urb = NULL;
1383 spin_lock_irqsave(&max3421_hcd->lock, flags);
1384 usb_hcd_unlink_urb_from_ep(hcd, urb);
1385 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1386
1387 /* must be called without the HCD spinlock: */
1388 usb_hcd_giveback_urb(hcd, urb, status);
1389 }
1390 return 1;
1391}
1392
1393static int
1394max3421_spi_thread(void *dev_id)
1395{
1396 struct usb_hcd *hcd = dev_id;
1397 struct spi_device *spi = to_spi_device(hcd->self.controller);
1398 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1399 int i, i_worked = 1;
1400
1401 /* set full-duplex SPI mode, low-active interrupt pin: */
1402 spi_wr8(hcd, MAX3421_REG_PINCTL,
1403 (BIT(MAX3421_PINCTL_FDUPSPI_BIT) | /* full-duplex */
1404 BIT(MAX3421_PINCTL_INTLEVEL_BIT))); /* low-active irq */
1405
1406 while (!kthread_should_stop()) {
1407 max3421_hcd->rev = spi_rd8(hcd, MAX3421_REG_REVISION);
1408 if (max3421_hcd->rev == 0x12 || max3421_hcd->rev == 0x13)
1409 break;
1410 dev_err(&spi->dev, "bad rev 0x%02x", max3421_hcd->rev);
1411 msleep(10000);
1412 }
1413 dev_info(&spi->dev, "rev 0x%x, SPI clk %dHz, bpw %u, irq %d\n",
1414 max3421_hcd->rev, spi->max_speed_hz, spi->bits_per_word,
1415 spi->irq);
1416
1417 while (!kthread_should_stop()) {
1418 if (!i_worked) {
1419 /*
1420 * We'll be waiting for wakeups from the hard
1421 * interrupt handler, so now is a good time to
1422 * sync our hien with the chip:
1423 */
1424 spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
1425
1426 set_current_state(TASK_INTERRUPTIBLE);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001427 if (test_and_clear_bit(ENABLE_IRQ, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001428 enable_irq(spi->irq);
David Mosberger2d531392014-04-28 22:14:07 -06001429 schedule();
1430 __set_current_state(TASK_RUNNING);
1431 }
1432
1433 i_worked = 0;
1434
1435 if (max3421_hcd->urb_done)
1436 i_worked |= max3421_urb_done(hcd);
1437 else if (max3421_handle_irqs(hcd))
1438 i_worked = 1;
1439 else if (!max3421_hcd->curr_urb)
1440 i_worked |= max3421_select_and_start_urb(hcd);
1441
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001442 if (test_and_clear_bit(RESET_HCD, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001443 /* reset the HCD: */
David Mosberger2d531392014-04-28 22:14:07 -06001444 i_worked |= max3421_reset_hcd(hcd);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001445 if (test_and_clear_bit(RESET_PORT, &max3421_hcd->todo)) {
David Mosberger2d531392014-04-28 22:14:07 -06001446 /* perform a USB bus reset: */
David Mosberger2d531392014-04-28 22:14:07 -06001447 spi_wr8(hcd, MAX3421_REG_HCTL,
1448 BIT(MAX3421_HCTL_BUSRST_BIT));
1449 i_worked = 1;
1450 }
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001451 if (test_and_clear_bit(CHECK_UNLINK, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001452 i_worked |= max3421_check_unlink(hcd);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001453 if (test_and_clear_bit(IOPIN_UPDATE, &max3421_hcd->todo)) {
David Mosberger2d531392014-04-28 22:14:07 -06001454 /*
1455 * IOPINS1/IOPINS2 do not auto-increment, so we can't
1456 * use spi_wr_buf().
1457 */
1458 for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
1459 u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
1460
1461 val = ((val & 0xf0) |
1462 (max3421_hcd->iopins[i] & 0x0f));
1463 spi_wr8(hcd, MAX3421_REG_IOPINS1 + i, val);
1464 max3421_hcd->iopins[i] = val;
1465 }
David Mosberger2d531392014-04-28 22:14:07 -06001466 i_worked = 1;
1467 }
1468 }
1469 set_current_state(TASK_RUNNING);
1470 dev_info(&spi->dev, "SPI thread exiting");
1471 return 0;
1472}
1473
1474static int
1475max3421_reset_port(struct usb_hcd *hcd)
1476{
1477 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1478
1479 max3421_hcd->port_status &= ~(USB_PORT_STAT_ENABLE |
1480 USB_PORT_STAT_LOW_SPEED);
David Mosberger-Tanga2b63cb2014-06-19 12:57:28 -06001481 max3421_hcd->port_status |= USB_PORT_STAT_RESET;
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001482 set_bit(RESET_PORT, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001483 wake_up_process(max3421_hcd->spi_thread);
1484 return 0;
1485}
1486
1487static int
1488max3421_reset(struct usb_hcd *hcd)
1489{
1490 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1491
1492 hcd->self.sg_tablesize = 0;
1493 hcd->speed = HCD_USB2;
1494 hcd->self.root_hub->speed = USB_SPEED_FULL;
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001495 set_bit(RESET_HCD, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001496 wake_up_process(max3421_hcd->spi_thread);
1497 return 0;
1498}
1499
1500static int
1501max3421_start(struct usb_hcd *hcd)
1502{
1503 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1504
1505 spin_lock_init(&max3421_hcd->lock);
1506 max3421_hcd->rh_state = MAX3421_RH_RUNNING;
1507
1508 INIT_LIST_HEAD(&max3421_hcd->ep_list);
1509
1510 hcd->power_budget = POWER_BUDGET;
1511 hcd->state = HC_STATE_RUNNING;
1512 hcd->uses_new_polling = 1;
1513 return 0;
1514}
1515
1516static void
1517max3421_stop(struct usb_hcd *hcd)
1518{
1519}
1520
1521static int
1522max3421_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1523{
1524 struct spi_device *spi = to_spi_device(hcd->self.controller);
1525 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1526 struct max3421_ep *max3421_ep;
1527 unsigned long flags;
1528 int retval;
1529
1530 switch (usb_pipetype(urb->pipe)) {
1531 case PIPE_INTERRUPT:
1532 case PIPE_ISOCHRONOUS:
1533 if (urb->interval < 0) {
1534 dev_err(&spi->dev,
1535 "%s: interval=%d for intr-/iso-pipe; expected > 0\n",
1536 __func__, urb->interval);
1537 return -EINVAL;
1538 }
Gustavo A. R. Silva93c747e2020-11-20 12:40:27 -06001539 break;
David Mosberger2d531392014-04-28 22:14:07 -06001540 default:
1541 break;
1542 }
1543
1544 spin_lock_irqsave(&max3421_hcd->lock, flags);
1545
1546 max3421_ep = urb->ep->hcpriv;
1547 if (!max3421_ep) {
1548 /* gets freed in max3421_endpoint_disable: */
Alexey Khoroshilov6c0f3692014-06-19 23:44:57 +04001549 max3421_ep = kzalloc(sizeof(struct max3421_ep), GFP_ATOMIC);
David Mosberger-Tang00c5aa12014-05-28 16:09:16 -06001550 if (!max3421_ep) {
1551 retval = -ENOMEM;
1552 goto out;
1553 }
David Mosberger2d531392014-04-28 22:14:07 -06001554 max3421_ep->ep = urb->ep;
1555 max3421_ep->last_active = max3421_hcd->frame_number;
1556 urb->ep->hcpriv = max3421_ep;
1557
1558 list_add_tail(&max3421_ep->ep_list, &max3421_hcd->ep_list);
1559 }
1560
1561 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1562 if (retval == 0) {
1563 /* Since we added to the queue, restart scheduling: */
1564 max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
1565 wake_up_process(max3421_hcd->spi_thread);
1566 }
1567
David Mosberger-Tang00c5aa12014-05-28 16:09:16 -06001568out:
David Mosberger2d531392014-04-28 22:14:07 -06001569 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1570 return retval;
1571}
1572
1573static int
1574max3421_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1575{
1576 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1577 unsigned long flags;
1578 int retval;
1579
1580 spin_lock_irqsave(&max3421_hcd->lock, flags);
1581
1582 /*
1583 * This will set urb->unlinked which in turn causes the entry
1584 * to be dropped at the next opportunity.
1585 */
1586 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1587 if (retval == 0) {
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001588 set_bit(CHECK_UNLINK, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001589 wake_up_process(max3421_hcd->spi_thread);
1590 }
1591 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1592 return retval;
1593}
1594
1595static void
1596max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1597{
1598 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1599 unsigned long flags;
1600
1601 spin_lock_irqsave(&max3421_hcd->lock, flags);
1602
1603 if (ep->hcpriv) {
1604 struct max3421_ep *max3421_ep = ep->hcpriv;
1605
1606 /* remove myself from the ep_list: */
1607 if (!list_empty(&max3421_ep->ep_list))
1608 list_del(&max3421_ep->ep_list);
1609 kfree(max3421_ep);
1610 ep->hcpriv = NULL;
1611 }
1612
1613 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1614}
1615
1616static int
1617max3421_get_frame_number(struct usb_hcd *hcd)
1618{
1619 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1620 return max3421_hcd->frame_number;
1621}
1622
1623/*
1624 * Should return a non-zero value when any port is undergoing a resume
1625 * transition while the root hub is suspended.
1626 */
1627static int
1628max3421_hub_status_data(struct usb_hcd *hcd, char *buf)
1629{
1630 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1631 unsigned long flags;
1632 int retval = 0;
1633
1634 spin_lock_irqsave(&max3421_hcd->lock, flags);
1635 if (!HCD_HW_ACCESSIBLE(hcd))
1636 goto done;
1637
1638 *buf = 0;
1639 if ((max3421_hcd->port_status & PORT_C_MASK) != 0) {
1640 *buf = (1 << 1); /* a hub over-current condition exists */
1641 dev_dbg(hcd->self.controller,
1642 "port status 0x%08x has changes\n",
1643 max3421_hcd->port_status);
1644 retval = 1;
1645 if (max3421_hcd->rh_state == MAX3421_RH_SUSPENDED)
1646 usb_hcd_resume_root_hub(hcd);
1647 }
1648done:
1649 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1650 return retval;
1651}
1652
1653static inline void
1654hub_descriptor(struct usb_hub_descriptor *desc)
1655{
1656 memset(desc, 0, sizeof(*desc));
1657 /*
1658 * See Table 11-13: Hub Descriptor in USB 2.0 spec.
1659 */
Sergei Shtylyove3d02e02015-03-29 01:14:03 +03001660 desc->bDescriptorType = USB_DT_HUB; /* hub descriptor */
David Mosberger2d531392014-04-28 22:14:07 -06001661 desc->bDescLength = 9;
Sergei Shtylyov2e48c462015-01-19 01:38:22 +03001662 desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM |
1663 HUB_CHAR_COMMON_OCPM);
David Mosberger2d531392014-04-28 22:14:07 -06001664 desc->bNbrPorts = 1;
1665}
1666
1667/*
1668 * Set the MAX3421E general-purpose output with number PIN_NUMBER to
1669 * VALUE (0 or 1). PIN_NUMBER may be in the range from 1-8. For
1670 * any other value, this function acts as a no-op.
1671 */
1672static void
1673max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
1674{
1675 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1676 u8 mask, idx;
1677
1678 --pin_number;
Jules Maselbas721fdc82017-09-15 18:58:45 +02001679 if (pin_number >= MAX3421_GPOUT_COUNT)
David Mosberger2d531392014-04-28 22:14:07 -06001680 return;
1681
Jaewon Kim59b71f72016-07-21 22:20:53 +09001682 mask = 1u << (pin_number % 4);
David Mosberger2d531392014-04-28 22:14:07 -06001683 idx = pin_number / 4;
1684
1685 if (value)
1686 max3421_hcd->iopins[idx] |= mask;
1687 else
1688 max3421_hcd->iopins[idx] &= ~mask;
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001689 set_bit(IOPIN_UPDATE, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001690 wake_up_process(max3421_hcd->spi_thread);
1691}
1692
1693static int
1694max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
1695 char *buf, u16 length)
1696{
1697 struct spi_device *spi = to_spi_device(hcd->self.controller);
1698 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1699 struct max3421_hcd_platform_data *pdata;
1700 unsigned long flags;
1701 int retval = 0;
1702
David Mosberger2d531392014-04-28 22:14:07 -06001703 pdata = spi->dev.platform_data;
Jules Maselbas892f6eb2017-10-31 11:40:33 +01001704
1705 spin_lock_irqsave(&max3421_hcd->lock, flags);
David Mosberger2d531392014-04-28 22:14:07 -06001706
1707 switch (type_req) {
1708 case ClearHubFeature:
1709 break;
1710 case ClearPortFeature:
1711 switch (value) {
1712 case USB_PORT_FEAT_SUSPEND:
1713 break;
1714 case USB_PORT_FEAT_POWER:
1715 dev_dbg(hcd->self.controller, "power-off\n");
David Mosberger-Tang4055e5e2014-05-29 10:23:55 -06001716 max3421_gpout_set_value(hcd, pdata->vbus_gpout,
1717 !pdata->vbus_active_level);
Gustavo A. R. Silva0d9b6d42020-07-07 14:56:07 -05001718 fallthrough;
David Mosberger2d531392014-04-28 22:14:07 -06001719 default:
1720 max3421_hcd->port_status &= ~(1 << value);
1721 }
1722 break;
1723 case GetHubDescriptor:
1724 hub_descriptor((struct usb_hub_descriptor *) buf);
1725 break;
1726
1727 case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
1728 case GetPortErrorCount:
1729 case SetHubDepth:
1730 /* USB3 only */
1731 goto error;
1732
1733 case GetHubStatus:
1734 *(__le32 *) buf = cpu_to_le32(0);
1735 break;
1736
1737 case GetPortStatus:
1738 if (index != 1) {
1739 retval = -EPIPE;
1740 goto error;
1741 }
1742 ((__le16 *) buf)[0] = cpu_to_le16(max3421_hcd->port_status);
1743 ((__le16 *) buf)[1] =
1744 cpu_to_le16(max3421_hcd->port_status >> 16);
1745 break;
1746
1747 case SetHubFeature:
1748 retval = -EPIPE;
1749 break;
1750
1751 case SetPortFeature:
1752 switch (value) {
1753 case USB_PORT_FEAT_LINK_STATE:
1754 case USB_PORT_FEAT_U1_TIMEOUT:
1755 case USB_PORT_FEAT_U2_TIMEOUT:
1756 case USB_PORT_FEAT_BH_PORT_RESET:
1757 goto error;
1758 case USB_PORT_FEAT_SUSPEND:
1759 if (max3421_hcd->active)
1760 max3421_hcd->port_status |=
1761 USB_PORT_STAT_SUSPEND;
1762 break;
1763 case USB_PORT_FEAT_POWER:
1764 dev_dbg(hcd->self.controller, "power-on\n");
1765 max3421_hcd->port_status |= USB_PORT_STAT_POWER;
David Mosberger-Tang4055e5e2014-05-29 10:23:55 -06001766 max3421_gpout_set_value(hcd, pdata->vbus_gpout,
1767 pdata->vbus_active_level);
David Mosberger2d531392014-04-28 22:14:07 -06001768 break;
1769 case USB_PORT_FEAT_RESET:
1770 max3421_reset_port(hcd);
Gustavo A. R. Silva0d9b6d42020-07-07 14:56:07 -05001771 fallthrough;
David Mosberger2d531392014-04-28 22:14:07 -06001772 default:
1773 if ((max3421_hcd->port_status & USB_PORT_STAT_POWER)
1774 != 0)
1775 max3421_hcd->port_status |= (1 << value);
1776 }
1777 break;
1778
1779 default:
1780 dev_dbg(hcd->self.controller,
1781 "hub control req%04x v%04x i%04x l%d\n",
1782 type_req, value, index, length);
1783error: /* "protocol stall" on error */
1784 retval = -EPIPE;
1785 }
1786
1787 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1788 return retval;
1789}
1790
1791static int
1792max3421_bus_suspend(struct usb_hcd *hcd)
1793{
1794 return -1;
1795}
1796
1797static int
1798max3421_bus_resume(struct usb_hcd *hcd)
1799{
1800 return -1;
1801}
1802
Julia Lawall887e2e02017-07-28 22:41:49 +02001803static const struct hc_driver max3421_hcd_desc = {
David Mosberger2d531392014-04-28 22:14:07 -06001804 .description = "max3421",
1805 .product_desc = DRIVER_DESC,
1806 .hcd_priv_size = sizeof(struct max3421_hcd),
1807 .flags = HCD_USB11,
1808 .reset = max3421_reset,
1809 .start = max3421_start,
1810 .stop = max3421_stop,
1811 .get_frame_number = max3421_get_frame_number,
1812 .urb_enqueue = max3421_urb_enqueue,
1813 .urb_dequeue = max3421_urb_dequeue,
David Mosberger2d531392014-04-28 22:14:07 -06001814 .endpoint_disable = max3421_endpoint_disable,
1815 .hub_status_data = max3421_hub_status_data,
1816 .hub_control = max3421_hub_control,
1817 .bus_suspend = max3421_bus_suspend,
1818 .bus_resume = max3421_bus_resume,
1819};
1820
1821static int
Jules Maselbas721fdc82017-09-15 18:58:45 +02001822max3421_of_vbus_en_pin(struct device *dev, struct max3421_hcd_platform_data *pdata)
1823{
1824 int retval;
1825 uint32_t value[2];
1826
1827 if (!pdata)
1828 return -EINVAL;
1829
1830 retval = of_property_read_u32_array(dev->of_node, "maxim,vbus-en-pin", value, 2);
1831 if (retval) {
1832 dev_err(dev, "device tree node property 'maxim,vbus-en-pin' is missing\n");
1833 return retval;
1834 }
1835 dev_info(dev, "property 'maxim,vbus-en-pin' value is <%d %d>\n", value[0], value[1]);
1836
1837 pdata->vbus_gpout = value[0];
1838 pdata->vbus_active_level = value[1];
1839
1840 return 0;
1841}
1842
1843static int
David Mosberger2d531392014-04-28 22:14:07 -06001844max3421_probe(struct spi_device *spi)
1845{
Jules Maselbas721fdc82017-09-15 18:58:45 +02001846 struct device *dev = &spi->dev;
David Mosberger2d531392014-04-28 22:14:07 -06001847 struct max3421_hcd *max3421_hcd;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001848 struct usb_hcd *hcd = NULL;
Jules Maselbas721fdc82017-09-15 18:58:45 +02001849 struct max3421_hcd_platform_data *pdata = NULL;
Yang Yingliang5a569342020-11-17 14:15:00 +08001850 int retval;
David Mosberger2d531392014-04-28 22:14:07 -06001851
1852 if (spi_setup(spi) < 0) {
1853 dev_err(&spi->dev, "Unable to setup SPI bus");
1854 return -EFAULT;
1855 }
1856
Jules Maselbas721fdc82017-09-15 18:58:45 +02001857 if (!spi->irq) {
1858 dev_err(dev, "Failed to get SPI IRQ");
1859 return -EFAULT;
1860 }
1861
1862 if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
1863 pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
1864 if (!pdata) {
Jules Maselbas721fdc82017-09-15 18:58:45 +02001865 retval = -ENOMEM;
1866 goto error;
1867 }
1868 retval = max3421_of_vbus_en_pin(dev, pdata);
1869 if (retval)
1870 goto error;
1871
1872 spi->dev.platform_data = pdata;
1873 }
1874
1875 pdata = spi->dev.platform_data;
1876 if (!pdata) {
1877 dev_err(&spi->dev, "driver configuration data is not provided\n");
1878 retval = -EFAULT;
1879 goto error;
1880 }
1881 if (pdata->vbus_active_level > 1) {
1882 dev_err(&spi->dev, "vbus active level value %d is out of range (0/1)\n", pdata->vbus_active_level);
1883 retval = -EINVAL;
1884 goto error;
1885 }
1886 if (pdata->vbus_gpout < 1 || pdata->vbus_gpout > MAX3421_GPOUT_COUNT) {
1887 dev_err(&spi->dev, "vbus gpout value %d is out of range (1..8)\n", pdata->vbus_gpout);
1888 retval = -EINVAL;
1889 goto error;
1890 }
1891
Yang Yingliang5a569342020-11-17 14:15:00 +08001892 retval = -ENOMEM;
David Mosberger2d531392014-04-28 22:14:07 -06001893 hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
1894 dev_name(&spi->dev));
1895 if (!hcd) {
1896 dev_err(&spi->dev, "failed to create HCD structure\n");
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001897 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001898 }
1899 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1900 max3421_hcd = hcd_to_max3421(hcd);
1901 max3421_hcd->next = max3421_hcd_list;
1902 max3421_hcd_list = max3421_hcd;
1903 INIT_LIST_HEAD(&max3421_hcd->ep_list);
1904
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001905 max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
Wolfram Sang13dcf7802016-08-25 19:39:07 +02001906 if (!max3421_hcd->tx)
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001907 goto error;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001908 max3421_hcd->rx = kmalloc(sizeof(*max3421_hcd->rx), GFP_KERNEL);
Wolfram Sang13dcf7802016-08-25 19:39:07 +02001909 if (!max3421_hcd->rx)
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001910 goto error;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001911
David Mosberger2d531392014-04-28 22:14:07 -06001912 max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd,
1913 "max3421_spi_thread");
1914 if (max3421_hcd->spi_thread == ERR_PTR(-ENOMEM)) {
1915 dev_err(&spi->dev,
1916 "failed to create SPI thread (out of memory)\n");
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001917 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001918 }
1919
1920 retval = usb_add_hcd(hcd, 0, 0);
1921 if (retval) {
1922 dev_err(&spi->dev, "failed to add HCD\n");
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001923 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001924 }
1925
1926 retval = request_irq(spi->irq, max3421_irq_handler,
1927 IRQF_TRIGGER_LOW, "max3421", hcd);
1928 if (retval < 0) {
David Mosberger2d531392014-04-28 22:14:07 -06001929 dev_err(&spi->dev, "failed to request irq %d\n", spi->irq);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001930 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001931 }
1932 return 0;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001933
1934error:
Jules Maselbas721fdc82017-09-15 18:58:45 +02001935 if (IS_ENABLED(CONFIG_OF) && dev->of_node && pdata) {
1936 devm_kfree(&spi->dev, pdata);
1937 spi->dev.platform_data = NULL;
1938 }
1939
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001940 if (hcd) {
1941 kfree(max3421_hcd->tx);
1942 kfree(max3421_hcd->rx);
1943 if (max3421_hcd->spi_thread)
1944 kthread_stop(max3421_hcd->spi_thread);
1945 usb_put_hcd(hcd);
1946 }
1947 return retval;
David Mosberger2d531392014-04-28 22:14:07 -06001948}
1949
1950static int
1951max3421_remove(struct spi_device *spi)
1952{
1953 struct max3421_hcd *max3421_hcd = NULL, **prev;
1954 struct usb_hcd *hcd = NULL;
1955 unsigned long flags;
1956
1957 for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) {
1958 max3421_hcd = *prev;
1959 hcd = max3421_to_hcd(max3421_hcd);
1960 if (hcd->self.controller == &spi->dev)
1961 break;
1962 }
1963 if (!max3421_hcd) {
1964 dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n",
1965 spi);
1966 return -ENODEV;
1967 }
1968
1969 usb_remove_hcd(hcd);
1970
1971 spin_lock_irqsave(&max3421_hcd->lock, flags);
1972
1973 kthread_stop(max3421_hcd->spi_thread);
1974 *prev = max3421_hcd->next;
1975
1976 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1977
1978 free_irq(spi->irq, hcd);
1979
1980 usb_put_hcd(hcd);
1981 return 0;
1982}
1983
Jules Maselbas721fdc82017-09-15 18:58:45 +02001984static const struct of_device_id max3421_of_match_table[] = {
1985 { .compatible = "maxim,max3421", },
1986 {},
1987};
1988MODULE_DEVICE_TABLE(of, max3421_of_match_table);
1989
David Mosberger2d531392014-04-28 22:14:07 -06001990static struct spi_driver max3421_driver = {
1991 .probe = max3421_probe,
1992 .remove = max3421_remove,
1993 .driver = {
1994 .name = "max3421-hcd",
Jules Maselbas721fdc82017-09-15 18:58:45 +02001995 .of_match_table = of_match_ptr(max3421_of_match_table),
David Mosberger2d531392014-04-28 22:14:07 -06001996 },
1997};
1998
Sachin Kamat7df45d52014-05-29 17:21:01 +05301999module_spi_driver(max3421_driver);
David Mosberger2d531392014-04-28 22:14:07 -06002000
2001MODULE_DESCRIPTION(DRIVER_DESC);
2002MODULE_AUTHOR("David Mosberger <davidm@egauge.net>");
2003MODULE_LICENSE("GPL");