blob: 852129ea076660f8dc4f495eece81fe8b5f0927c [file] [log] [blame]
Thomas Gleixnera10e7632019-05-31 01:09:32 -07001// SPDX-License-Identifier: GPL-2.0-only
Octavian Purdila338a1282014-11-06 15:48:03 +02002/*
3 * Driver for the Diolan DLN-2 USB adapter
4 *
5 * Copyright (c) 2014 Intel Corporation
6 *
7 * Derived from:
8 * i2c-diolan-u2c.c
9 * Copyright (c) 2010-2011 Ericsson AB
Octavian Purdila338a1282014-11-06 15:48:03 +020010 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/usb.h>
17#include <linux/i2c.h>
18#include <linux/mutex.h>
19#include <linux/platform_device.h>
20#include <linux/mfd/core.h>
21#include <linux/mfd/dln2.h>
22#include <linux/rculist.h>
23
24struct dln2_header {
25 __le16 size;
26 __le16 id;
27 __le16 echo;
28 __le16 handle;
29};
30
31struct dln2_response {
32 struct dln2_header hdr;
33 __le16 result;
34};
35
36#define DLN2_GENERIC_MODULE_ID 0x00
37#define DLN2_GENERIC_CMD(cmd) DLN2_CMD(cmd, DLN2_GENERIC_MODULE_ID)
38#define CMD_GET_DEVICE_VER DLN2_GENERIC_CMD(0x30)
39#define CMD_GET_DEVICE_SN DLN2_GENERIC_CMD(0x31)
40
41#define DLN2_HW_ID 0x200
42#define DLN2_USB_TIMEOUT 200 /* in ms */
43#define DLN2_MAX_RX_SLOTS 16
44#define DLN2_MAX_URBS 16
45#define DLN2_RX_BUF_SIZE 512
46
47enum dln2_handle {
48 DLN2_HANDLE_EVENT = 0, /* don't change, hardware defined */
49 DLN2_HANDLE_CTRL,
50 DLN2_HANDLE_GPIO,
51 DLN2_HANDLE_I2C,
Laurentiu Palcu21cf3312014-11-07 14:45:14 +020052 DLN2_HANDLE_SPI,
Jack Andersen313c84b52021-10-18 13:25:41 +020053 DLN2_HANDLE_ADC,
Octavian Purdila338a1282014-11-06 15:48:03 +020054 DLN2_HANDLES
55};
56
57/*
58 * Receive context used between the receive demultiplexer and the transfer
59 * routine. While sending a request the transfer routine will look for a free
60 * receive context and use it to wait for a response and to receive the URB and
61 * thus the response data.
62 */
63struct dln2_rx_context {
64 /* completion used to wait for a response */
65 struct completion done;
66
67 /* if non-NULL the URB contains the response */
68 struct urb *urb;
69
70 /* if true then this context is used to wait for a response */
71 bool in_use;
72};
73
74/*
75 * Receive contexts for a particular DLN2 module (i2c, gpio, etc.). We use the
76 * handle header field to identify the module in dln2_dev.mod_rx_slots and then
77 * the echo header field to index the slots field and find the receive context
78 * for a particular request.
79 */
80struct dln2_mod_rx_slots {
81 /* RX slots bitmap */
82 DECLARE_BITMAP(bmap, DLN2_MAX_RX_SLOTS);
83
84 /* used to wait for a free RX slot */
85 wait_queue_head_t wq;
86
87 /* used to wait for an RX operation to complete */
88 struct dln2_rx_context slots[DLN2_MAX_RX_SLOTS];
89
90 /* avoid races between alloc/free_rx_slot and dln2_rx_transfer */
91 spinlock_t lock;
92};
93
Andy Shevchenkofb945c92020-02-26 16:51:58 +020094enum dln2_endpoint {
95 DLN2_EP_OUT = 0,
96 DLN2_EP_IN = 1,
97};
98
Octavian Purdila338a1282014-11-06 15:48:03 +020099struct dln2_dev {
100 struct usb_device *usb_dev;
101 struct usb_interface *interface;
102 u8 ep_in;
103 u8 ep_out;
104
105 struct urb *rx_urb[DLN2_MAX_URBS];
106 void *rx_buf[DLN2_MAX_URBS];
107
108 struct dln2_mod_rx_slots mod_rx_slots[DLN2_HANDLES];
109
110 struct list_head event_cb_list;
111 spinlock_t event_cb_lock;
112
113 bool disconnect;
114 int active_transfers;
115 wait_queue_head_t disconnect_wq;
116 spinlock_t disconnect_lock;
117};
118
119struct dln2_event_cb_entry {
120 struct list_head list;
121 u16 id;
122 struct platform_device *pdev;
123 dln2_event_cb_t callback;
124};
125
126int dln2_register_event_cb(struct platform_device *pdev, u16 id,
127 dln2_event_cb_t event_cb)
128{
129 struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent);
130 struct dln2_event_cb_entry *i, *entry;
131 unsigned long flags;
132 int ret = 0;
133
134 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
135 if (!entry)
136 return -ENOMEM;
137
138 entry->id = id;
139 entry->callback = event_cb;
140 entry->pdev = pdev;
141
142 spin_lock_irqsave(&dln2->event_cb_lock, flags);
143
144 list_for_each_entry(i, &dln2->event_cb_list, list) {
145 if (i->id == id) {
146 ret = -EBUSY;
147 break;
148 }
149 }
150
151 if (!ret)
152 list_add_rcu(&entry->list, &dln2->event_cb_list);
153
154 spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
155
156 if (ret)
157 kfree(entry);
158
159 return ret;
160}
161EXPORT_SYMBOL(dln2_register_event_cb);
162
163void dln2_unregister_event_cb(struct platform_device *pdev, u16 id)
164{
165 struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent);
166 struct dln2_event_cb_entry *i;
167 unsigned long flags;
168 bool found = false;
169
170 spin_lock_irqsave(&dln2->event_cb_lock, flags);
171
172 list_for_each_entry(i, &dln2->event_cb_list, list) {
173 if (i->id == id) {
174 list_del_rcu(&i->list);
175 found = true;
176 break;
177 }
178 }
179
180 spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
181
182 if (found) {
183 synchronize_rcu();
184 kfree(i);
185 }
186}
187EXPORT_SYMBOL(dln2_unregister_event_cb);
188
189/*
190 * Returns true if a valid transfer slot is found. In this case the URB must not
191 * be resubmitted immediately in dln2_rx as we need the data when dln2_transfer
192 * is woke up. It will be resubmitted there.
193 */
194static bool dln2_transfer_complete(struct dln2_dev *dln2, struct urb *urb,
195 u16 handle, u16 rx_slot)
196{
197 struct device *dev = &dln2->interface->dev;
198 struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
199 struct dln2_rx_context *rxc;
Sebastian Andrzej Siewior50d44f82018-07-02 09:31:32 +0200200 unsigned long flags;
Octavian Purdila338a1282014-11-06 15:48:03 +0200201 bool valid_slot = false;
202
Octavian Purdila00ee7a32014-11-18 14:57:59 +0200203 if (rx_slot >= DLN2_MAX_RX_SLOTS)
204 goto out;
205
Octavian Purdila338a1282014-11-06 15:48:03 +0200206 rxc = &rxs->slots[rx_slot];
207
Sebastian Andrzej Siewior50d44f82018-07-02 09:31:32 +0200208 spin_lock_irqsave(&rxs->lock, flags);
Octavian Purdila338a1282014-11-06 15:48:03 +0200209 if (rxc->in_use && !rxc->urb) {
210 rxc->urb = urb;
211 complete(&rxc->done);
212 valid_slot = true;
213 }
Sebastian Andrzej Siewior50d44f82018-07-02 09:31:32 +0200214 spin_unlock_irqrestore(&rxs->lock, flags);
Octavian Purdila338a1282014-11-06 15:48:03 +0200215
Octavian Purdila00ee7a32014-11-18 14:57:59 +0200216out:
Octavian Purdila338a1282014-11-06 15:48:03 +0200217 if (!valid_slot)
218 dev_warn(dev, "bad/late response %d/%d\n", handle, rx_slot);
219
220 return valid_slot;
221}
222
223static void dln2_run_event_callbacks(struct dln2_dev *dln2, u16 id, u16 echo,
224 void *data, int len)
225{
226 struct dln2_event_cb_entry *i;
227
228 rcu_read_lock();
229
230 list_for_each_entry_rcu(i, &dln2->event_cb_list, list) {
231 if (i->id == id) {
232 i->callback(i->pdev, echo, data, len);
233 break;
234 }
235 }
236
237 rcu_read_unlock();
238}
239
240static void dln2_rx(struct urb *urb)
241{
242 struct dln2_dev *dln2 = urb->context;
243 struct dln2_header *hdr = urb->transfer_buffer;
244 struct device *dev = &dln2->interface->dev;
245 u16 id, echo, handle, size;
246 u8 *data;
247 int len;
248 int err;
249
250 switch (urb->status) {
251 case 0:
252 /* success */
253 break;
254 case -ECONNRESET:
255 case -ENOENT:
256 case -ESHUTDOWN:
257 case -EPIPE:
258 /* this urb is terminated, clean up */
259 dev_dbg(dev, "urb shutting down with status %d\n", urb->status);
260 return;
261 default:
262 dev_dbg(dev, "nonzero urb status received %d\n", urb->status);
263 goto out;
264 }
265
266 if (urb->actual_length < sizeof(struct dln2_header)) {
267 dev_err(dev, "short response: %d\n", urb->actual_length);
268 goto out;
269 }
270
271 handle = le16_to_cpu(hdr->handle);
272 id = le16_to_cpu(hdr->id);
273 echo = le16_to_cpu(hdr->echo);
274 size = le16_to_cpu(hdr->size);
275
276 if (size != urb->actual_length) {
277 dev_err(dev, "size mismatch: handle %x cmd %x echo %x size %d actual %d\n",
278 handle, id, echo, size, urb->actual_length);
279 goto out;
280 }
281
282 if (handle >= DLN2_HANDLES) {
283 dev_warn(dev, "invalid handle %d\n", handle);
284 goto out;
285 }
286
287 data = urb->transfer_buffer + sizeof(struct dln2_header);
288 len = urb->actual_length - sizeof(struct dln2_header);
289
290 if (handle == DLN2_HANDLE_EVENT) {
Andy Shevchenko3d858942020-07-23 16:02:46 +0300291 unsigned long flags;
292
293 spin_lock_irqsave(&dln2->event_cb_lock, flags);
Octavian Purdila338a1282014-11-06 15:48:03 +0200294 dln2_run_event_callbacks(dln2, id, echo, data, len);
Andy Shevchenko3d858942020-07-23 16:02:46 +0300295 spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
Octavian Purdila338a1282014-11-06 15:48:03 +0200296 } else {
297 /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
298 if (dln2_transfer_complete(dln2, urb, handle, echo))
299 return;
300 }
301
302out:
303 err = usb_submit_urb(urb, GFP_ATOMIC);
304 if (err < 0)
305 dev_err(dev, "failed to resubmit RX URB: %d\n", err);
306}
307
308static void *dln2_prep_buf(u16 handle, u16 cmd, u16 echo, const void *obuf,
309 int *obuf_len, gfp_t gfp)
310{
311 int len;
312 void *buf;
313 struct dln2_header *hdr;
314
315 len = *obuf_len + sizeof(*hdr);
316 buf = kmalloc(len, gfp);
317 if (!buf)
318 return NULL;
319
320 hdr = (struct dln2_header *)buf;
321 hdr->id = cpu_to_le16(cmd);
322 hdr->size = cpu_to_le16(len);
323 hdr->echo = cpu_to_le16(echo);
324 hdr->handle = cpu_to_le16(handle);
325
326 memcpy(buf + sizeof(*hdr), obuf, *obuf_len);
327
328 *obuf_len = len;
329
330 return buf;
331}
332
333static int dln2_send_wait(struct dln2_dev *dln2, u16 handle, u16 cmd, u16 echo,
334 const void *obuf, int obuf_len)
335{
336 int ret = 0;
337 int len = obuf_len;
338 void *buf;
339 int actual;
340
341 buf = dln2_prep_buf(handle, cmd, echo, obuf, &len, GFP_KERNEL);
342 if (!buf)
343 return -ENOMEM;
344
345 ret = usb_bulk_msg(dln2->usb_dev,
346 usb_sndbulkpipe(dln2->usb_dev, dln2->ep_out),
347 buf, len, &actual, DLN2_USB_TIMEOUT);
348
349 kfree(buf);
350
351 return ret;
352}
353
354static bool find_free_slot(struct dln2_dev *dln2, u16 handle, int *slot)
355{
356 struct dln2_mod_rx_slots *rxs;
357 unsigned long flags;
358
359 if (dln2->disconnect) {
360 *slot = -ENODEV;
361 return true;
362 }
363
364 rxs = &dln2->mod_rx_slots[handle];
365
366 spin_lock_irqsave(&rxs->lock, flags);
367
368 *slot = find_first_zero_bit(rxs->bmap, DLN2_MAX_RX_SLOTS);
369
370 if (*slot < DLN2_MAX_RX_SLOTS) {
371 struct dln2_rx_context *rxc = &rxs->slots[*slot];
372
373 set_bit(*slot, rxs->bmap);
374 rxc->in_use = true;
375 }
376
377 spin_unlock_irqrestore(&rxs->lock, flags);
378
379 return *slot < DLN2_MAX_RX_SLOTS;
380}
381
382static int alloc_rx_slot(struct dln2_dev *dln2, u16 handle)
383{
384 int ret;
385 int slot;
386
387 /*
388 * No need to timeout here, the wait is bounded by the timeout in
389 * _dln2_transfer.
390 */
391 ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq,
392 find_free_slot(dln2, handle, &slot));
393 if (ret < 0)
394 return ret;
395
396 return slot;
397}
398
399static void free_rx_slot(struct dln2_dev *dln2, u16 handle, int slot)
400{
401 struct dln2_mod_rx_slots *rxs;
402 struct urb *urb = NULL;
403 unsigned long flags;
404 struct dln2_rx_context *rxc;
405
406 rxs = &dln2->mod_rx_slots[handle];
407
408 spin_lock_irqsave(&rxs->lock, flags);
409
410 clear_bit(slot, rxs->bmap);
411
412 rxc = &rxs->slots[slot];
413 rxc->in_use = false;
414 urb = rxc->urb;
415 rxc->urb = NULL;
416 reinit_completion(&rxc->done);
417
418 spin_unlock_irqrestore(&rxs->lock, flags);
419
420 if (urb) {
421 int err;
422 struct device *dev = &dln2->interface->dev;
423
424 err = usb_submit_urb(urb, GFP_KERNEL);
425 if (err < 0)
426 dev_err(dev, "failed to resubmit RX URB: %d\n", err);
427 }
428
429 wake_up_interruptible(&rxs->wq);
430}
431
432static int _dln2_transfer(struct dln2_dev *dln2, u16 handle, u16 cmd,
433 const void *obuf, unsigned obuf_len,
434 void *ibuf, unsigned *ibuf_len)
435{
436 int ret = 0;
437 int rx_slot;
438 struct dln2_response *rsp;
439 struct dln2_rx_context *rxc;
440 struct device *dev = &dln2->interface->dev;
Nicholas Mc Guire48579a92015-03-16 11:20:49 -0400441 const unsigned long timeout = msecs_to_jiffies(DLN2_USB_TIMEOUT);
Octavian Purdila338a1282014-11-06 15:48:03 +0200442 struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
Dan Carpenter2fc2b482014-11-18 14:58:00 +0200443 int size;
Octavian Purdila338a1282014-11-06 15:48:03 +0200444
445 spin_lock(&dln2->disconnect_lock);
446 if (!dln2->disconnect)
447 dln2->active_transfers++;
448 else
449 ret = -ENODEV;
450 spin_unlock(&dln2->disconnect_lock);
451
452 if (ret)
453 return ret;
454
455 rx_slot = alloc_rx_slot(dln2, handle);
456 if (rx_slot < 0) {
457 ret = rx_slot;
458 goto out_decr;
459 }
460
461 ret = dln2_send_wait(dln2, handle, cmd, rx_slot, obuf, obuf_len);
462 if (ret < 0) {
463 dev_err(dev, "USB write failed: %d\n", ret);
464 goto out_free_rx_slot;
465 }
466
467 rxc = &rxs->slots[rx_slot];
468
469 ret = wait_for_completion_interruptible_timeout(&rxc->done, timeout);
470 if (ret <= 0) {
471 if (!ret)
472 ret = -ETIMEDOUT;
473 goto out_free_rx_slot;
Octavian Purdila7ca2b1c2014-11-18 14:57:57 +0200474 } else {
475 ret = 0;
Octavian Purdila338a1282014-11-06 15:48:03 +0200476 }
477
478 if (dln2->disconnect) {
479 ret = -ENODEV;
480 goto out_free_rx_slot;
481 }
482
483 /* if we got here we know that the response header has been checked */
484 rsp = rxc->urb->transfer_buffer;
Dan Carpenter2fc2b482014-11-18 14:58:00 +0200485 size = le16_to_cpu(rsp->hdr.size);
Octavian Purdila338a1282014-11-06 15:48:03 +0200486
Dan Carpenter2fc2b482014-11-18 14:58:00 +0200487 if (size < sizeof(*rsp)) {
Octavian Purdila338a1282014-11-06 15:48:03 +0200488 ret = -EPROTO;
489 goto out_free_rx_slot;
490 }
491
492 if (le16_to_cpu(rsp->result) > 0x80) {
493 dev_dbg(dev, "%d received response with error %d\n",
494 handle, le16_to_cpu(rsp->result));
495 ret = -EREMOTEIO;
496 goto out_free_rx_slot;
497 }
498
Octavian Purdila7ca2b1c2014-11-18 14:57:57 +0200499 if (!ibuf)
Octavian Purdila338a1282014-11-06 15:48:03 +0200500 goto out_free_rx_slot;
Octavian Purdila338a1282014-11-06 15:48:03 +0200501
Dan Carpenter2fc2b482014-11-18 14:58:00 +0200502 if (*ibuf_len > size - sizeof(*rsp))
503 *ibuf_len = size - sizeof(*rsp);
Octavian Purdila338a1282014-11-06 15:48:03 +0200504
505 memcpy(ibuf, rsp + 1, *ibuf_len);
506
507out_free_rx_slot:
508 free_rx_slot(dln2, handle, rx_slot);
509out_decr:
510 spin_lock(&dln2->disconnect_lock);
511 dln2->active_transfers--;
512 spin_unlock(&dln2->disconnect_lock);
513 if (dln2->disconnect)
514 wake_up(&dln2->disconnect_wq);
515
516 return ret;
517}
518
519int dln2_transfer(struct platform_device *pdev, u16 cmd,
520 const void *obuf, unsigned obuf_len,
521 void *ibuf, unsigned *ibuf_len)
522{
523 struct dln2_platform_data *dln2_pdata;
524 struct dln2_dev *dln2;
525 u16 handle;
526
527 dln2 = dev_get_drvdata(pdev->dev.parent);
528 dln2_pdata = dev_get_platdata(&pdev->dev);
529 handle = dln2_pdata->handle;
530
531 return _dln2_transfer(dln2, handle, cmd, obuf, obuf_len, ibuf,
532 ibuf_len);
533}
534EXPORT_SYMBOL(dln2_transfer);
535
536static int dln2_check_hw(struct dln2_dev *dln2)
537{
538 int ret;
539 __le32 hw_type;
540 int len = sizeof(hw_type);
541
542 ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_VER,
543 NULL, 0, &hw_type, &len);
544 if (ret < 0)
545 return ret;
546 if (len < sizeof(hw_type))
547 return -EREMOTEIO;
548
549 if (le32_to_cpu(hw_type) != DLN2_HW_ID) {
550 dev_err(&dln2->interface->dev, "Device ID 0x%x not supported\n",
551 le32_to_cpu(hw_type));
552 return -ENODEV;
553 }
554
555 return 0;
556}
557
558static int dln2_print_serialno(struct dln2_dev *dln2)
559{
560 int ret;
561 __le32 serial_no;
562 int len = sizeof(serial_no);
563 struct device *dev = &dln2->interface->dev;
564
565 ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_SN, NULL, 0,
566 &serial_no, &len);
567 if (ret < 0)
568 return ret;
569 if (len < sizeof(serial_no))
570 return -EREMOTEIO;
571
572 dev_info(dev, "Diolan DLN2 serial %u\n", le32_to_cpu(serial_no));
573
574 return 0;
575}
576
577static int dln2_hw_init(struct dln2_dev *dln2)
578{
579 int ret;
580
581 ret = dln2_check_hw(dln2);
582 if (ret < 0)
583 return ret;
584
585 return dln2_print_serialno(dln2);
586}
587
588static void dln2_free_rx_urbs(struct dln2_dev *dln2)
589{
590 int i;
591
592 for (i = 0; i < DLN2_MAX_URBS; i++) {
Octavian Purdila338a1282014-11-06 15:48:03 +0200593 usb_free_urb(dln2->rx_urb[i]);
594 kfree(dln2->rx_buf[i]);
595 }
596}
597
Octavian Purdilaee231ae2015-01-19 13:51:35 +0200598static void dln2_stop_rx_urbs(struct dln2_dev *dln2)
599{
600 int i;
601
602 for (i = 0; i < DLN2_MAX_URBS; i++)
603 usb_kill_urb(dln2->rx_urb[i]);
604}
605
Octavian Purdila338a1282014-11-06 15:48:03 +0200606static void dln2_free(struct dln2_dev *dln2)
607{
608 dln2_free_rx_urbs(dln2);
609 usb_put_dev(dln2->usb_dev);
610 kfree(dln2);
611}
612
613static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
614 struct usb_host_interface *hostif)
615{
616 int i;
Octavian Purdila338a1282014-11-06 15:48:03 +0200617 const int rx_max_size = DLN2_RX_BUF_SIZE;
Octavian Purdila338a1282014-11-06 15:48:03 +0200618
619 for (i = 0; i < DLN2_MAX_URBS; i++) {
620 dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL);
621 if (!dln2->rx_buf[i])
622 return -ENOMEM;
623
624 dln2->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
625 if (!dln2->rx_urb[i])
626 return -ENOMEM;
627
628 usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev,
629 usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in),
630 dln2->rx_buf[i], rx_max_size, dln2_rx, dln2);
Octavian Purdilaee231ae2015-01-19 13:51:35 +0200631 }
Octavian Purdila338a1282014-11-06 15:48:03 +0200632
Octavian Purdilaee231ae2015-01-19 13:51:35 +0200633 return 0;
634}
635
636static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp)
637{
638 struct device *dev = &dln2->interface->dev;
639 int ret;
640 int i;
641
642 for (i = 0; i < DLN2_MAX_URBS; i++) {
643 ret = usb_submit_urb(dln2->rx_urb[i], gfp);
Octavian Purdila338a1282014-11-06 15:48:03 +0200644 if (ret < 0) {
645 dev_err(dev, "failed to submit RX URB: %d\n", ret);
646 return ret;
647 }
648 }
649
650 return 0;
651}
652
Andy Shevchenkoe3fadb32020-03-23 22:02:37 +0200653enum {
654 DLN2_ACPI_MATCH_GPIO = 0,
655 DLN2_ACPI_MATCH_I2C = 1,
656 DLN2_ACPI_MATCH_SPI = 2,
Jack Andersen313c84b52021-10-18 13:25:41 +0200657 DLN2_ACPI_MATCH_ADC = 3,
Andy Shevchenkoe3fadb32020-03-23 22:02:37 +0200658};
659
Octavian Purdila338a1282014-11-06 15:48:03 +0200660static struct dln2_platform_data dln2_pdata_gpio = {
661 .handle = DLN2_HANDLE_GPIO,
662};
663
Andy Shevchenkoe3fadb32020-03-23 22:02:37 +0200664static struct mfd_cell_acpi_match dln2_acpi_match_gpio = {
665 .adr = DLN2_ACPI_MATCH_GPIO,
666};
667
Octavian Purdila338a1282014-11-06 15:48:03 +0200668/* Only one I2C port seems to be supported on current hardware */
669static struct dln2_platform_data dln2_pdata_i2c = {
670 .handle = DLN2_HANDLE_I2C,
671 .port = 0,
672};
673
Andy Shevchenkoe3fadb32020-03-23 22:02:37 +0200674static struct mfd_cell_acpi_match dln2_acpi_match_i2c = {
675 .adr = DLN2_ACPI_MATCH_I2C,
676};
677
Laurentiu Palcu21cf3312014-11-07 14:45:14 +0200678/* Only one SPI port supported */
679static struct dln2_platform_data dln2_pdata_spi = {
680 .handle = DLN2_HANDLE_SPI,
681 .port = 0,
682};
683
Andy Shevchenkoe3fadb32020-03-23 22:02:37 +0200684static struct mfd_cell_acpi_match dln2_acpi_match_spi = {
685 .adr = DLN2_ACPI_MATCH_SPI,
686};
687
Jack Andersen313c84b52021-10-18 13:25:41 +0200688/* Only one ADC port supported */
689static struct dln2_platform_data dln2_pdata_adc = {
690 .handle = DLN2_HANDLE_ADC,
691 .port = 0,
692};
693
694static struct mfd_cell_acpi_match dln2_acpi_match_adc = {
695 .adr = DLN2_ACPI_MATCH_ADC,
696};
697
Octavian Purdila338a1282014-11-06 15:48:03 +0200698static const struct mfd_cell dln2_devs[] = {
699 {
700 .name = "dln2-gpio",
Andy Shevchenkoe3fadb32020-03-23 22:02:37 +0200701 .acpi_match = &dln2_acpi_match_gpio,
Octavian Purdila338a1282014-11-06 15:48:03 +0200702 .platform_data = &dln2_pdata_gpio,
703 .pdata_size = sizeof(struct dln2_platform_data),
704 },
705 {
706 .name = "dln2-i2c",
Andy Shevchenkoe3fadb32020-03-23 22:02:37 +0200707 .acpi_match = &dln2_acpi_match_i2c,
Octavian Purdila338a1282014-11-06 15:48:03 +0200708 .platform_data = &dln2_pdata_i2c,
709 .pdata_size = sizeof(struct dln2_platform_data),
710 },
Laurentiu Palcu21cf3312014-11-07 14:45:14 +0200711 {
712 .name = "dln2-spi",
Andy Shevchenkoe3fadb32020-03-23 22:02:37 +0200713 .acpi_match = &dln2_acpi_match_spi,
Laurentiu Palcu21cf3312014-11-07 14:45:14 +0200714 .platform_data = &dln2_pdata_spi,
715 .pdata_size = sizeof(struct dln2_platform_data),
716 },
Jack Andersen313c84b52021-10-18 13:25:41 +0200717 {
718 .name = "dln2-adc",
719 .acpi_match = &dln2_acpi_match_adc,
720 .platform_data = &dln2_pdata_adc,
721 .pdata_size = sizeof(struct dln2_platform_data),
722 },
Octavian Purdila338a1282014-11-06 15:48:03 +0200723};
724
Octavian Purdilaee231ae2015-01-19 13:51:35 +0200725static void dln2_stop(struct dln2_dev *dln2)
Octavian Purdila338a1282014-11-06 15:48:03 +0200726{
Octavian Purdila338a1282014-11-06 15:48:03 +0200727 int i, j;
728
729 /* don't allow starting new transfers */
730 spin_lock(&dln2->disconnect_lock);
731 dln2->disconnect = true;
732 spin_unlock(&dln2->disconnect_lock);
733
734 /* cancel in progress transfers */
735 for (i = 0; i < DLN2_HANDLES; i++) {
736 struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[i];
737 unsigned long flags;
738
739 spin_lock_irqsave(&rxs->lock, flags);
740
741 /* cancel all response waiters */
742 for (j = 0; j < DLN2_MAX_RX_SLOTS; j++) {
743 struct dln2_rx_context *rxc = &rxs->slots[j];
744
745 if (rxc->in_use)
746 complete(&rxc->done);
747 }
748
749 spin_unlock_irqrestore(&rxs->lock, flags);
750 }
751
752 /* wait for transfers to end */
753 wait_event(dln2->disconnect_wq, !dln2->active_transfers);
754
Octavian Purdilaee231ae2015-01-19 13:51:35 +0200755 dln2_stop_rx_urbs(dln2);
756}
757
758static void dln2_disconnect(struct usb_interface *interface)
759{
760 struct dln2_dev *dln2 = usb_get_intfdata(interface);
761
762 dln2_stop(dln2);
763
Octavian Purdila338a1282014-11-06 15:48:03 +0200764 mfd_remove_devices(&interface->dev);
765
766 dln2_free(dln2);
767}
768
769static int dln2_probe(struct usb_interface *interface,
770 const struct usb_device_id *usb_id)
771{
772 struct usb_host_interface *hostif = interface->cur_altsetting;
Oliver Neukum2b8bd602019-11-21 11:28:10 +0100773 struct usb_endpoint_descriptor *epin;
774 struct usb_endpoint_descriptor *epout;
Octavian Purdila338a1282014-11-06 15:48:03 +0200775 struct device *dev = &interface->dev;
776 struct dln2_dev *dln2;
777 int ret;
778 int i, j;
779
780 if (hostif->desc.bInterfaceNumber != 0 ||
781 hostif->desc.bNumEndpoints < 2)
782 return -ENODEV;
783
Andy Shevchenkofb945c92020-02-26 16:51:58 +0200784 epout = &hostif->endpoint[DLN2_EP_OUT].desc;
Oliver Neukum2b8bd602019-11-21 11:28:10 +0100785 if (!usb_endpoint_is_bulk_out(epout))
786 return -ENODEV;
Andy Shevchenkofb945c92020-02-26 16:51:58 +0200787 epin = &hostif->endpoint[DLN2_EP_IN].desc;
Oliver Neukum2b8bd602019-11-21 11:28:10 +0100788 if (!usb_endpoint_is_bulk_in(epin))
789 return -ENODEV;
790
Octavian Purdila338a1282014-11-06 15:48:03 +0200791 dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
792 if (!dln2)
793 return -ENOMEM;
794
Oliver Neukum2b8bd602019-11-21 11:28:10 +0100795 dln2->ep_out = epout->bEndpointAddress;
796 dln2->ep_in = epin->bEndpointAddress;
Octavian Purdila338a1282014-11-06 15:48:03 +0200797 dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface));
798 dln2->interface = interface;
799 usb_set_intfdata(interface, dln2);
800 init_waitqueue_head(&dln2->disconnect_wq);
801
802 for (i = 0; i < DLN2_HANDLES; i++) {
803 init_waitqueue_head(&dln2->mod_rx_slots[i].wq);
804 spin_lock_init(&dln2->mod_rx_slots[i].lock);
805 for (j = 0; j < DLN2_MAX_RX_SLOTS; j++)
806 init_completion(&dln2->mod_rx_slots[i].slots[j].done);
807 }
808
809 spin_lock_init(&dln2->event_cb_lock);
810 spin_lock_init(&dln2->disconnect_lock);
811 INIT_LIST_HEAD(&dln2->event_cb_list);
812
813 ret = dln2_setup_rx_urbs(dln2, hostif);
814 if (ret)
Octavian Purdilaee231ae2015-01-19 13:51:35 +0200815 goto out_free;
816
817 ret = dln2_start_rx_urbs(dln2, GFP_KERNEL);
818 if (ret)
819 goto out_stop_rx;
Octavian Purdila338a1282014-11-06 15:48:03 +0200820
821 ret = dln2_hw_init(dln2);
822 if (ret < 0) {
823 dev_err(dev, "failed to initialize hardware\n");
Octavian Purdilaee231ae2015-01-19 13:51:35 +0200824 goto out_stop_rx;
Octavian Purdila338a1282014-11-06 15:48:03 +0200825 }
826
827 ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs));
828 if (ret != 0) {
829 dev_err(dev, "failed to add mfd devices to core\n");
Octavian Purdilaee231ae2015-01-19 13:51:35 +0200830 goto out_stop_rx;
Octavian Purdila338a1282014-11-06 15:48:03 +0200831 }
832
833 return 0;
834
Octavian Purdilaee231ae2015-01-19 13:51:35 +0200835out_stop_rx:
836 dln2_stop_rx_urbs(dln2);
837
838out_free:
Octavian Purdila338a1282014-11-06 15:48:03 +0200839 dln2_free(dln2);
840
841 return ret;
842}
843
Octavian Purdila3daa1222015-01-19 13:51:36 +0200844static int dln2_suspend(struct usb_interface *iface, pm_message_t message)
845{
846 struct dln2_dev *dln2 = usb_get_intfdata(iface);
847
848 dln2_stop(dln2);
849
850 return 0;
851}
852
853static int dln2_resume(struct usb_interface *iface)
854{
855 struct dln2_dev *dln2 = usb_get_intfdata(iface);
856
857 dln2->disconnect = false;
858
859 return dln2_start_rx_urbs(dln2, GFP_NOIO);
860}
861
Octavian Purdila338a1282014-11-06 15:48:03 +0200862static const struct usb_device_id dln2_table[] = {
863 { USB_DEVICE(0xa257, 0x2013) },
864 { }
865};
866
867MODULE_DEVICE_TABLE(usb, dln2_table);
868
869static struct usb_driver dln2_driver = {
870 .name = "dln2",
871 .probe = dln2_probe,
872 .disconnect = dln2_disconnect,
873 .id_table = dln2_table,
Octavian Purdila3daa1222015-01-19 13:51:36 +0200874 .suspend = dln2_suspend,
875 .resume = dln2_resume,
Octavian Purdila338a1282014-11-06 15:48:03 +0200876};
877
878module_usb_driver(dln2_driver);
879
880MODULE_AUTHOR("Octavian Purdila <octavian.purdila@intel.com>");
881MODULE_DESCRIPTION("Core driver for the Diolan DLN2 interface adapter");
882MODULE_LICENSE("GPL v2");