blob: 7b241992ad5abe8ca456d84996712f7af84b383a [file] [log] [blame]
Andrey Konovalovf2c2e712020-02-24 17:13:03 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * USB Raw Gadget driver.
4 * See Documentation/usb/raw-gadget.rst for more details.
5 *
6 * Andrey Konovalov <andreyknvl@gmail.com>
7 */
8
9#include <linux/compiler.h>
10#include <linux/debugfs.h>
11#include <linux/delay.h>
12#include <linux/kref.h>
13#include <linux/miscdevice.h>
14#include <linux/module.h>
15#include <linux/semaphore.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/uaccess.h>
19#include <linux/wait.h>
20
21#include <linux/usb.h>
22#include <linux/usb/ch9.h>
23#include <linux/usb/ch11.h>
24#include <linux/usb/gadget.h>
25
26#include <uapi/linux/usb/raw_gadget.h>
27
28#define DRIVER_DESC "USB Raw Gadget"
29#define DRIVER_NAME "raw-gadget"
30
31MODULE_DESCRIPTION(DRIVER_DESC);
32MODULE_AUTHOR("Andrey Konovalov");
33MODULE_LICENSE("GPL");
34
35/*----------------------------------------------------------------------*/
36
37#define RAW_EVENT_QUEUE_SIZE 16
38
39struct raw_event_queue {
40 /* See the comment in raw_event_queue_fetch() for locking details. */
41 spinlock_t lock;
42 struct semaphore sema;
43 struct usb_raw_event *events[RAW_EVENT_QUEUE_SIZE];
44 int size;
45};
46
47static void raw_event_queue_init(struct raw_event_queue *queue)
48{
49 spin_lock_init(&queue->lock);
50 sema_init(&queue->sema, 0);
51 queue->size = 0;
52}
53
54static int raw_event_queue_add(struct raw_event_queue *queue,
55 enum usb_raw_event_type type, size_t length, const void *data)
56{
57 unsigned long flags;
58 struct usb_raw_event *event;
59
60 spin_lock_irqsave(&queue->lock, flags);
61 if (WARN_ON(queue->size >= RAW_EVENT_QUEUE_SIZE)) {
62 spin_unlock_irqrestore(&queue->lock, flags);
63 return -ENOMEM;
64 }
65 event = kmalloc(sizeof(*event) + length, GFP_ATOMIC);
66 if (!event) {
67 spin_unlock_irqrestore(&queue->lock, flags);
68 return -ENOMEM;
69 }
70 event->type = type;
71 event->length = length;
72 if (event->length)
73 memcpy(&event->data[0], data, length);
74 queue->events[queue->size] = event;
75 queue->size++;
76 up(&queue->sema);
77 spin_unlock_irqrestore(&queue->lock, flags);
78 return 0;
79}
80
81static struct usb_raw_event *raw_event_queue_fetch(
82 struct raw_event_queue *queue)
83{
Andrey Konovalovfdd10492020-04-07 16:47:54 +020084 int ret;
Andrey Konovalovf2c2e712020-02-24 17:13:03 +010085 unsigned long flags;
86 struct usb_raw_event *event;
87
88 /*
89 * This function can be called concurrently. We first check that
90 * there's at least one event queued by decrementing the semaphore,
91 * and then take the lock to protect queue struct fields.
92 */
Andrey Konovalovfdd10492020-04-07 16:47:54 +020093 ret = down_interruptible(&queue->sema);
94 if (ret)
95 return ERR_PTR(ret);
Andrey Konovalovf2c2e712020-02-24 17:13:03 +010096 spin_lock_irqsave(&queue->lock, flags);
Andrey Konovalovfdd10492020-04-07 16:47:54 +020097 /*
98 * queue->size must have the same value as queue->sema counter (before
99 * the down_interruptible() call above), so this check is a fail-safe.
100 */
101 if (WARN_ON(!queue->size)) {
102 spin_unlock_irqrestore(&queue->lock, flags);
103 return ERR_PTR(-ENODEV);
104 }
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100105 event = queue->events[0];
106 queue->size--;
107 memmove(&queue->events[0], &queue->events[1],
108 queue->size * sizeof(queue->events[0]));
109 spin_unlock_irqrestore(&queue->lock, flags);
110 return event;
111}
112
113static void raw_event_queue_destroy(struct raw_event_queue *queue)
114{
115 int i;
116
117 for (i = 0; i < queue->size; i++)
118 kfree(queue->events[i]);
119 queue->size = 0;
120}
121
122/*----------------------------------------------------------------------*/
123
124struct raw_dev;
125
126#define USB_RAW_MAX_ENDPOINTS 32
127
128enum ep_state {
129 STATE_EP_DISABLED,
130 STATE_EP_ENABLED,
131};
132
133struct raw_ep {
134 struct raw_dev *dev;
135 enum ep_state state;
136 struct usb_ep *ep;
137 struct usb_request *req;
138 bool urb_queued;
139 bool disabling;
140 ssize_t status;
141};
142
143enum dev_state {
144 STATE_DEV_INVALID = 0,
145 STATE_DEV_OPENED,
146 STATE_DEV_INITIALIZED,
147 STATE_DEV_RUNNING,
148 STATE_DEV_CLOSED,
149 STATE_DEV_FAILED
150};
151
152struct raw_dev {
153 struct kref count;
154 spinlock_t lock;
155
156 const char *udc_name;
157 struct usb_gadget_driver driver;
158
159 /* Reference to misc device: */
160 struct device *dev;
161
162 /* Protected by lock: */
163 enum dev_state state;
164 bool gadget_registered;
165 struct usb_gadget *gadget;
166 struct usb_request *req;
167 bool ep0_in_pending;
168 bool ep0_out_pending;
169 bool ep0_urb_queued;
170 ssize_t ep0_status;
171 struct raw_ep eps[USB_RAW_MAX_ENDPOINTS];
172
173 struct completion ep0_done;
174 struct raw_event_queue queue;
175};
176
177static struct raw_dev *dev_new(void)
178{
179 struct raw_dev *dev;
180
181 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
182 if (!dev)
183 return NULL;
184 /* Matches kref_put() in raw_release(). */
185 kref_init(&dev->count);
186 spin_lock_init(&dev->lock);
187 init_completion(&dev->ep0_done);
188 raw_event_queue_init(&dev->queue);
189 return dev;
190}
191
192static void dev_free(struct kref *kref)
193{
194 struct raw_dev *dev = container_of(kref, struct raw_dev, count);
195 int i;
196
197 kfree(dev->udc_name);
198 kfree(dev->driver.udc_name);
199 if (dev->req) {
200 if (dev->ep0_urb_queued)
201 usb_ep_dequeue(dev->gadget->ep0, dev->req);
202 usb_ep_free_request(dev->gadget->ep0, dev->req);
203 }
204 raw_event_queue_destroy(&dev->queue);
205 for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) {
206 if (dev->eps[i].state != STATE_EP_ENABLED)
207 continue;
208 usb_ep_disable(dev->eps[i].ep);
209 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
210 kfree(dev->eps[i].ep->desc);
211 dev->eps[i].state = STATE_EP_DISABLED;
212 }
213 kfree(dev);
214}
215
216/*----------------------------------------------------------------------*/
217
218static int raw_queue_event(struct raw_dev *dev,
219 enum usb_raw_event_type type, size_t length, const void *data)
220{
221 int ret = 0;
222 unsigned long flags;
223
224 ret = raw_event_queue_add(&dev->queue, type, length, data);
225 if (ret < 0) {
226 spin_lock_irqsave(&dev->lock, flags);
227 dev->state = STATE_DEV_FAILED;
228 spin_unlock_irqrestore(&dev->lock, flags);
229 }
230 return ret;
231}
232
233static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)
234{
235 struct raw_dev *dev = req->context;
236 unsigned long flags;
237
238 spin_lock_irqsave(&dev->lock, flags);
239 if (req->status)
240 dev->ep0_status = req->status;
241 else
242 dev->ep0_status = req->actual;
243 if (dev->ep0_in_pending)
244 dev->ep0_in_pending = false;
245 else
246 dev->ep0_out_pending = false;
247 spin_unlock_irqrestore(&dev->lock, flags);
248
249 complete(&dev->ep0_done);
250}
251
252static int gadget_bind(struct usb_gadget *gadget,
253 struct usb_gadget_driver *driver)
254{
255 int ret = 0;
256 struct raw_dev *dev = container_of(driver, struct raw_dev, driver);
257 struct usb_request *req;
258 unsigned long flags;
259
260 if (strcmp(gadget->name, dev->udc_name) != 0)
261 return -ENODEV;
262
263 set_gadget_data(gadget, dev);
264 req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
265 if (!req) {
266 dev_err(&gadget->dev, "usb_ep_alloc_request failed\n");
267 set_gadget_data(gadget, NULL);
268 return -ENOMEM;
269 }
270
271 spin_lock_irqsave(&dev->lock, flags);
272 dev->req = req;
273 dev->req->context = dev;
274 dev->req->complete = gadget_ep0_complete;
275 dev->gadget = gadget;
276 spin_unlock_irqrestore(&dev->lock, flags);
277
278 /* Matches kref_put() in gadget_unbind(). */
279 kref_get(&dev->count);
280
281 ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
282 if (ret < 0)
283 dev_err(&gadget->dev, "failed to queue event\n");
284
285 return ret;
286}
287
288static void gadget_unbind(struct usb_gadget *gadget)
289{
290 struct raw_dev *dev = get_gadget_data(gadget);
291
292 set_gadget_data(gadget, NULL);
293 /* Matches kref_get() in gadget_bind(). */
294 kref_put(&dev->count, dev_free);
295}
296
297static int gadget_setup(struct usb_gadget *gadget,
298 const struct usb_ctrlrequest *ctrl)
299{
300 int ret = 0;
301 struct raw_dev *dev = get_gadget_data(gadget);
302 unsigned long flags;
303
304 spin_lock_irqsave(&dev->lock, flags);
305 if (dev->state != STATE_DEV_RUNNING) {
306 dev_err(&gadget->dev, "ignoring, device is not running\n");
307 ret = -ENODEV;
308 goto out_unlock;
309 }
310 if (dev->ep0_in_pending || dev->ep0_out_pending) {
311 dev_dbg(&gadget->dev, "stalling, request already pending\n");
312 ret = -EBUSY;
313 goto out_unlock;
314 }
315 if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength)
316 dev->ep0_in_pending = true;
317 else
318 dev->ep0_out_pending = true;
319 spin_unlock_irqrestore(&dev->lock, flags);
320
321 ret = raw_queue_event(dev, USB_RAW_EVENT_CONTROL, sizeof(*ctrl), ctrl);
322 if (ret < 0)
323 dev_err(&gadget->dev, "failed to queue event\n");
324 goto out;
325
326out_unlock:
327 spin_unlock_irqrestore(&dev->lock, flags);
328out:
329 return ret;
330}
331
332/* These are currently unused but present in case UDC driver requires them. */
333static void gadget_disconnect(struct usb_gadget *gadget) { }
334static void gadget_suspend(struct usb_gadget *gadget) { }
335static void gadget_resume(struct usb_gadget *gadget) { }
336static void gadget_reset(struct usb_gadget *gadget) { }
337
338/*----------------------------------------------------------------------*/
339
340static struct miscdevice raw_misc_device;
341
342static int raw_open(struct inode *inode, struct file *fd)
343{
344 struct raw_dev *dev;
345
346 /* Nonblocking I/O is not supported yet. */
347 if (fd->f_flags & O_NONBLOCK)
348 return -EINVAL;
349
350 dev = dev_new();
351 if (!dev)
352 return -ENOMEM;
353 fd->private_data = dev;
354 dev->state = STATE_DEV_OPENED;
355 dev->dev = raw_misc_device.this_device;
356 return 0;
357}
358
359static int raw_release(struct inode *inode, struct file *fd)
360{
361 int ret = 0;
362 struct raw_dev *dev = fd->private_data;
363 unsigned long flags;
364 bool unregister = false;
365
366 spin_lock_irqsave(&dev->lock, flags);
367 dev->state = STATE_DEV_CLOSED;
368 if (!dev->gadget) {
369 spin_unlock_irqrestore(&dev->lock, flags);
370 goto out_put;
371 }
372 if (dev->gadget_registered)
373 unregister = true;
374 dev->gadget_registered = false;
375 spin_unlock_irqrestore(&dev->lock, flags);
376
377 if (unregister) {
378 ret = usb_gadget_unregister_driver(&dev->driver);
379 if (ret != 0)
380 dev_err(dev->dev,
381 "usb_gadget_unregister_driver() failed with %d\n",
382 ret);
383 /* Matches kref_get() in raw_ioctl_run(). */
384 kref_put(&dev->count, dev_free);
385 }
386
387out_put:
388 /* Matches dev_new() in raw_open(). */
389 kref_put(&dev->count, dev_free);
390 return ret;
391}
392
393/*----------------------------------------------------------------------*/
394
395static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
396{
397 int ret = 0;
398 struct usb_raw_init arg;
399 char *udc_driver_name;
400 char *udc_device_name;
401 unsigned long flags;
402
Dan Carpenter068fbff2020-04-06 17:51:19 +0300403 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
404 return -EFAULT;
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100405
406 switch (arg.speed) {
407 case USB_SPEED_UNKNOWN:
408 arg.speed = USB_SPEED_HIGH;
409 break;
410 case USB_SPEED_LOW:
411 case USB_SPEED_FULL:
412 case USB_SPEED_HIGH:
413 case USB_SPEED_SUPER:
414 break;
415 default:
416 return -EINVAL;
417 }
418
419 udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
420 if (!udc_driver_name)
421 return -ENOMEM;
422 ret = strscpy(udc_driver_name, &arg.driver_name[0],
423 UDC_NAME_LENGTH_MAX);
424 if (ret < 0) {
425 kfree(udc_driver_name);
426 return ret;
427 }
428 ret = 0;
429
430 udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
431 if (!udc_device_name) {
432 kfree(udc_driver_name);
433 return -ENOMEM;
434 }
435 ret = strscpy(udc_device_name, &arg.device_name[0],
436 UDC_NAME_LENGTH_MAX);
437 if (ret < 0) {
438 kfree(udc_driver_name);
439 kfree(udc_device_name);
440 return ret;
441 }
442 ret = 0;
443
444 spin_lock_irqsave(&dev->lock, flags);
445 if (dev->state != STATE_DEV_OPENED) {
446 dev_dbg(dev->dev, "fail, device is not opened\n");
447 kfree(udc_driver_name);
448 kfree(udc_device_name);
449 ret = -EINVAL;
450 goto out_unlock;
451 }
452 dev->udc_name = udc_driver_name;
453
454 dev->driver.function = DRIVER_DESC;
455 dev->driver.max_speed = arg.speed;
456 dev->driver.setup = gadget_setup;
457 dev->driver.disconnect = gadget_disconnect;
458 dev->driver.bind = gadget_bind;
459 dev->driver.unbind = gadget_unbind;
460 dev->driver.suspend = gadget_suspend;
461 dev->driver.resume = gadget_resume;
462 dev->driver.reset = gadget_reset;
463 dev->driver.driver.name = DRIVER_NAME;
464 dev->driver.udc_name = udc_device_name;
465 dev->driver.match_existing_only = 1;
466
467 dev->state = STATE_DEV_INITIALIZED;
468
469out_unlock:
470 spin_unlock_irqrestore(&dev->lock, flags);
471 return ret;
472}
473
474static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
475{
476 int ret = 0;
477 unsigned long flags;
478
479 if (value)
480 return -EINVAL;
481
482 spin_lock_irqsave(&dev->lock, flags);
483 if (dev->state != STATE_DEV_INITIALIZED) {
484 dev_dbg(dev->dev, "fail, device is not initialized\n");
485 ret = -EINVAL;
486 goto out_unlock;
487 }
488 spin_unlock_irqrestore(&dev->lock, flags);
489
490 ret = usb_gadget_probe_driver(&dev->driver);
491
492 spin_lock_irqsave(&dev->lock, flags);
493 if (ret) {
494 dev_err(dev->dev,
495 "fail, usb_gadget_probe_driver returned %d\n", ret);
496 dev->state = STATE_DEV_FAILED;
497 goto out_unlock;
498 }
499 dev->gadget_registered = true;
500 dev->state = STATE_DEV_RUNNING;
501 /* Matches kref_put() in raw_release(). */
502 kref_get(&dev->count);
503
504out_unlock:
505 spin_unlock_irqrestore(&dev->lock, flags);
506 return ret;
507}
508
509static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
510{
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100511 struct usb_raw_event arg;
512 unsigned long flags;
513 struct usb_raw_event *event;
514 uint32_t length;
515
Dan Carpenter068fbff2020-04-06 17:51:19 +0300516 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
517 return -EFAULT;
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100518
519 spin_lock_irqsave(&dev->lock, flags);
520 if (dev->state != STATE_DEV_RUNNING) {
521 dev_dbg(dev->dev, "fail, device is not running\n");
522 spin_unlock_irqrestore(&dev->lock, flags);
523 return -EINVAL;
524 }
525 if (!dev->gadget) {
526 dev_dbg(dev->dev, "fail, gadget is not bound\n");
527 spin_unlock_irqrestore(&dev->lock, flags);
528 return -EBUSY;
529 }
530 spin_unlock_irqrestore(&dev->lock, flags);
531
532 event = raw_event_queue_fetch(&dev->queue);
Andrey Konovalovfdd10492020-04-07 16:47:54 +0200533 if (PTR_ERR(event) == -EINTR) {
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100534 dev_dbg(&dev->gadget->dev, "event fetching interrupted\n");
535 return -EINTR;
536 }
Andrey Konovalovfdd10492020-04-07 16:47:54 +0200537 if (IS_ERR(event)) {
538 dev_err(&dev->gadget->dev, "failed to fetch event\n");
539 spin_lock_irqsave(&dev->lock, flags);
540 dev->state = STATE_DEV_FAILED;
541 spin_unlock_irqrestore(&dev->lock, flags);
542 return -ENODEV;
543 }
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100544 length = min(arg.length, event->length);
Dan Carpenter068fbff2020-04-06 17:51:19 +0300545 if (copy_to_user((void __user *)value, event, sizeof(*event) + length))
546 return -EFAULT;
547
548 return 0;
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100549}
550
551static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
552 bool get_from_user)
553{
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100554 void *data;
555
Dan Carpenter068fbff2020-04-06 17:51:19 +0300556 if (copy_from_user(io, ptr, sizeof(*io)))
557 return ERR_PTR(-EFAULT);
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100558 if (io->ep >= USB_RAW_MAX_ENDPOINTS)
559 return ERR_PTR(-EINVAL);
560 if (!usb_raw_io_flags_valid(io->flags))
561 return ERR_PTR(-EINVAL);
562 if (io->length > PAGE_SIZE)
563 return ERR_PTR(-EINVAL);
564 if (get_from_user)
565 data = memdup_user(ptr + sizeof(*io), io->length);
566 else {
567 data = kmalloc(io->length, GFP_KERNEL);
568 if (!data)
569 data = ERR_PTR(-ENOMEM);
570 }
571 return data;
572}
573
574static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
575 void *data, bool in)
576{
577 int ret = 0;
578 unsigned long flags;
579
580 spin_lock_irqsave(&dev->lock, flags);
581 if (dev->state != STATE_DEV_RUNNING) {
582 dev_dbg(dev->dev, "fail, device is not running\n");
583 ret = -EINVAL;
584 goto out_unlock;
585 }
586 if (!dev->gadget) {
587 dev_dbg(dev->dev, "fail, gadget is not bound\n");
588 ret = -EBUSY;
589 goto out_unlock;
590 }
591 if (dev->ep0_urb_queued) {
592 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
593 ret = -EBUSY;
594 goto out_unlock;
595 }
596 if ((in && !dev->ep0_in_pending) ||
597 (!in && !dev->ep0_out_pending)) {
598 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
599 ret = -EBUSY;
600 goto out_unlock;
601 }
602 if (WARN_ON(in && dev->ep0_out_pending)) {
603 ret = -ENODEV;
604 dev->state = STATE_DEV_FAILED;
605 goto out_done;
606 }
607 if (WARN_ON(!in && dev->ep0_in_pending)) {
608 ret = -ENODEV;
609 dev->state = STATE_DEV_FAILED;
610 goto out_done;
611 }
612
613 dev->req->buf = data;
614 dev->req->length = io->length;
615 dev->req->zero = usb_raw_io_flags_zero(io->flags);
616 dev->ep0_urb_queued = true;
617 spin_unlock_irqrestore(&dev->lock, flags);
618
619 ret = usb_ep_queue(dev->gadget->ep0, dev->req, GFP_KERNEL);
620 if (ret) {
621 dev_err(&dev->gadget->dev,
622 "fail, usb_ep_queue returned %d\n", ret);
623 spin_lock_irqsave(&dev->lock, flags);
624 dev->state = STATE_DEV_FAILED;
625 goto out_done;
626 }
627
628 ret = wait_for_completion_interruptible(&dev->ep0_done);
629 if (ret) {
630 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
631 usb_ep_dequeue(dev->gadget->ep0, dev->req);
632 wait_for_completion(&dev->ep0_done);
633 spin_lock_irqsave(&dev->lock, flags);
634 goto out_done;
635 }
636
637 spin_lock_irqsave(&dev->lock, flags);
638 ret = dev->ep0_status;
639
640out_done:
641 dev->ep0_urb_queued = false;
642out_unlock:
643 spin_unlock_irqrestore(&dev->lock, flags);
644 return ret;
645}
646
647static int raw_ioctl_ep0_write(struct raw_dev *dev, unsigned long value)
648{
649 int ret = 0;
650 void *data;
651 struct usb_raw_ep_io io;
652
653 data = raw_alloc_io_data(&io, (void __user *)value, true);
654 if (IS_ERR(data))
655 return PTR_ERR(data);
656 ret = raw_process_ep0_io(dev, &io, data, true);
657 kfree(data);
658 return ret;
659}
660
661static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)
662{
663 int ret = 0;
664 void *data;
665 struct usb_raw_ep_io io;
666 unsigned int length;
667
668 data = raw_alloc_io_data(&io, (void __user *)value, false);
669 if (IS_ERR(data))
670 return PTR_ERR(data);
671 ret = raw_process_ep0_io(dev, &io, data, false);
Andrey Konovalov6e507642020-05-07 19:06:54 +0200672 if (ret < 0)
Dan Carpenter068fbff2020-04-06 17:51:19 +0300673 goto free;
674
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100675 length = min(io.length, (unsigned int)ret);
Dan Carpenter068fbff2020-04-06 17:51:19 +0300676 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
677 ret = -EFAULT;
Andrey Konovalov6e507642020-05-07 19:06:54 +0200678 else
679 ret = length;
Dan Carpenter068fbff2020-04-06 17:51:19 +0300680free:
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100681 kfree(data);
682 return ret;
683}
684
685static bool check_ep_caps(struct usb_ep *ep,
686 struct usb_endpoint_descriptor *desc)
687{
688 switch (usb_endpoint_type(desc)) {
689 case USB_ENDPOINT_XFER_ISOC:
690 if (!ep->caps.type_iso)
691 return false;
692 break;
693 case USB_ENDPOINT_XFER_BULK:
694 if (!ep->caps.type_bulk)
695 return false;
696 break;
697 case USB_ENDPOINT_XFER_INT:
698 if (!ep->caps.type_int)
699 return false;
700 break;
701 default:
702 return false;
703 }
704
705 if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
706 return false;
707 if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
708 return false;
709
710 return true;
711}
712
713static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
714{
715 int ret = 0, i;
716 unsigned long flags;
717 struct usb_endpoint_descriptor *desc;
718 struct usb_ep *ep = NULL;
719
720 desc = memdup_user((void __user *)value, sizeof(*desc));
721 if (IS_ERR(desc))
722 return PTR_ERR(desc);
723
724 /*
725 * Endpoints with a maxpacket length of 0 can cause crashes in UDC
726 * drivers.
727 */
728 if (usb_endpoint_maxp(desc) == 0) {
729 dev_dbg(dev->dev, "fail, bad endpoint maxpacket\n");
730 kfree(desc);
731 return -EINVAL;
732 }
733
734 spin_lock_irqsave(&dev->lock, flags);
735 if (dev->state != STATE_DEV_RUNNING) {
736 dev_dbg(dev->dev, "fail, device is not running\n");
737 ret = -EINVAL;
738 goto out_free;
739 }
740 if (!dev->gadget) {
741 dev_dbg(dev->dev, "fail, gadget is not bound\n");
742 ret = -EBUSY;
743 goto out_free;
744 }
745
746 for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) {
747 if (dev->eps[i].state == STATE_EP_ENABLED)
748 continue;
749 break;
750 }
751 if (i == USB_RAW_MAX_ENDPOINTS) {
752 dev_dbg(&dev->gadget->dev,
753 "fail, no device endpoints available\n");
754 ret = -EBUSY;
755 goto out_free;
756 }
757
758 gadget_for_each_ep(ep, dev->gadget) {
759 if (ep->enabled)
760 continue;
761 if (!check_ep_caps(ep, desc))
762 continue;
763 ep->desc = desc;
764 ret = usb_ep_enable(ep);
765 if (ret < 0) {
766 dev_err(&dev->gadget->dev,
767 "fail, usb_ep_enable returned %d\n", ret);
768 goto out_free;
769 }
770 dev->eps[i].req = usb_ep_alloc_request(ep, GFP_ATOMIC);
771 if (!dev->eps[i].req) {
772 dev_err(&dev->gadget->dev,
773 "fail, usb_ep_alloc_request failed\n");
774 usb_ep_disable(ep);
775 ret = -ENOMEM;
776 goto out_free;
777 }
778 dev->eps[i].ep = ep;
779 dev->eps[i].state = STATE_EP_ENABLED;
780 ep->driver_data = &dev->eps[i];
781 ret = i;
782 goto out_unlock;
783 }
784
785 dev_dbg(&dev->gadget->dev, "fail, no gadget endpoints available\n");
786 ret = -EBUSY;
787
788out_free:
789 kfree(desc);
790out_unlock:
791 spin_unlock_irqrestore(&dev->lock, flags);
792 return ret;
793}
794
795static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
796{
797 int ret = 0, i = value;
798 unsigned long flags;
799 const void *desc;
800
801 if (i < 0 || i >= USB_RAW_MAX_ENDPOINTS)
802 return -EINVAL;
803
804 spin_lock_irqsave(&dev->lock, flags);
805 if (dev->state != STATE_DEV_RUNNING) {
806 dev_dbg(dev->dev, "fail, device is not running\n");
807 ret = -EINVAL;
808 goto out_unlock;
809 }
810 if (!dev->gadget) {
811 dev_dbg(dev->dev, "fail, gadget is not bound\n");
812 ret = -EBUSY;
813 goto out_unlock;
814 }
815 if (dev->eps[i].state != STATE_EP_ENABLED) {
816 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
817 ret = -EINVAL;
818 goto out_unlock;
819 }
820 if (dev->eps[i].disabling) {
821 dev_dbg(&dev->gadget->dev,
822 "fail, disable already in progress\n");
823 ret = -EINVAL;
824 goto out_unlock;
825 }
826 if (dev->eps[i].urb_queued) {
827 dev_dbg(&dev->gadget->dev,
828 "fail, waiting for urb completion\n");
829 ret = -EINVAL;
830 goto out_unlock;
831 }
832 dev->eps[i].disabling = true;
833 spin_unlock_irqrestore(&dev->lock, flags);
834
835 usb_ep_disable(dev->eps[i].ep);
836
837 spin_lock_irqsave(&dev->lock, flags);
838 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
839 desc = dev->eps[i].ep->desc;
840 dev->eps[i].ep = NULL;
841 dev->eps[i].state = STATE_EP_DISABLED;
842 kfree(desc);
843 dev->eps[i].disabling = false;
844
845out_unlock:
846 spin_unlock_irqrestore(&dev->lock, flags);
847 return ret;
848}
849
850static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)
851{
852 struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data;
853 struct raw_dev *dev = r_ep->dev;
854 unsigned long flags;
855
856 spin_lock_irqsave(&dev->lock, flags);
857 if (req->status)
858 r_ep->status = req->status;
859 else
860 r_ep->status = req->actual;
861 spin_unlock_irqrestore(&dev->lock, flags);
862
863 complete((struct completion *)req->context);
864}
865
866static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
867 void *data, bool in)
868{
869 int ret = 0;
870 unsigned long flags;
871 struct raw_ep *ep = &dev->eps[io->ep];
872 DECLARE_COMPLETION_ONSTACK(done);
873
874 spin_lock_irqsave(&dev->lock, flags);
875 if (dev->state != STATE_DEV_RUNNING) {
876 dev_dbg(dev->dev, "fail, device is not running\n");
877 ret = -EINVAL;
878 goto out_unlock;
879 }
880 if (!dev->gadget) {
881 dev_dbg(dev->dev, "fail, gadget is not bound\n");
882 ret = -EBUSY;
883 goto out_unlock;
884 }
885 if (ep->state != STATE_EP_ENABLED) {
886 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
887 ret = -EBUSY;
888 goto out_unlock;
889 }
890 if (ep->disabling) {
891 dev_dbg(&dev->gadget->dev,
892 "fail, endpoint is already being disabled\n");
893 ret = -EBUSY;
894 goto out_unlock;
895 }
896 if (ep->urb_queued) {
897 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
898 ret = -EBUSY;
899 goto out_unlock;
900 }
901 if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) {
902 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
903 ret = -EINVAL;
904 goto out_unlock;
905 }
906
907 ep->dev = dev;
908 ep->req->context = &done;
909 ep->req->complete = gadget_ep_complete;
910 ep->req->buf = data;
911 ep->req->length = io->length;
912 ep->req->zero = usb_raw_io_flags_zero(io->flags);
913 ep->urb_queued = true;
914 spin_unlock_irqrestore(&dev->lock, flags);
915
916 ret = usb_ep_queue(ep->ep, ep->req, GFP_KERNEL);
917 if (ret) {
918 dev_err(&dev->gadget->dev,
919 "fail, usb_ep_queue returned %d\n", ret);
920 spin_lock_irqsave(&dev->lock, flags);
921 dev->state = STATE_DEV_FAILED;
922 goto out_done;
923 }
924
925 ret = wait_for_completion_interruptible(&done);
926 if (ret) {
927 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
928 usb_ep_dequeue(ep->ep, ep->req);
929 wait_for_completion(&done);
930 spin_lock_irqsave(&dev->lock, flags);
931 goto out_done;
932 }
933
934 spin_lock_irqsave(&dev->lock, flags);
935 ret = ep->status;
936
937out_done:
938 ep->urb_queued = false;
939out_unlock:
940 spin_unlock_irqrestore(&dev->lock, flags);
941 return ret;
942}
943
944static int raw_ioctl_ep_write(struct raw_dev *dev, unsigned long value)
945{
946 int ret = 0;
947 char *data;
948 struct usb_raw_ep_io io;
949
950 data = raw_alloc_io_data(&io, (void __user *)value, true);
951 if (IS_ERR(data))
952 return PTR_ERR(data);
953 ret = raw_process_ep_io(dev, &io, data, true);
954 kfree(data);
955 return ret;
956}
957
958static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)
959{
960 int ret = 0;
961 char *data;
962 struct usb_raw_ep_io io;
963 unsigned int length;
964
965 data = raw_alloc_io_data(&io, (void __user *)value, false);
966 if (IS_ERR(data))
967 return PTR_ERR(data);
968 ret = raw_process_ep_io(dev, &io, data, false);
Andrey Konovalov6e507642020-05-07 19:06:54 +0200969 if (ret < 0)
Dan Carpenter068fbff2020-04-06 17:51:19 +0300970 goto free;
971
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100972 length = min(io.length, (unsigned int)ret);
Dan Carpenter068fbff2020-04-06 17:51:19 +0300973 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
974 ret = -EFAULT;
Andrey Konovalov6e507642020-05-07 19:06:54 +0200975 else
976 ret = length;
Dan Carpenter068fbff2020-04-06 17:51:19 +0300977free:
Andrey Konovalovf2c2e712020-02-24 17:13:03 +0100978 kfree(data);
979 return ret;
980}
981
982static int raw_ioctl_configure(struct raw_dev *dev, unsigned long value)
983{
984 int ret = 0;
985 unsigned long flags;
986
987 if (value)
988 return -EINVAL;
989 spin_lock_irqsave(&dev->lock, flags);
990 if (dev->state != STATE_DEV_RUNNING) {
991 dev_dbg(dev->dev, "fail, device is not running\n");
992 ret = -EINVAL;
993 goto out_unlock;
994 }
995 if (!dev->gadget) {
996 dev_dbg(dev->dev, "fail, gadget is not bound\n");
997 ret = -EBUSY;
998 goto out_unlock;
999 }
1000 usb_gadget_set_state(dev->gadget, USB_STATE_CONFIGURED);
1001
1002out_unlock:
1003 spin_unlock_irqrestore(&dev->lock, flags);
1004 return ret;
1005}
1006
1007static int raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value)
1008{
1009 int ret = 0;
1010 unsigned long flags;
1011
1012 spin_lock_irqsave(&dev->lock, flags);
1013 if (dev->state != STATE_DEV_RUNNING) {
1014 dev_dbg(dev->dev, "fail, device is not running\n");
1015 ret = -EINVAL;
1016 goto out_unlock;
1017 }
1018 if (!dev->gadget) {
1019 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1020 ret = -EBUSY;
1021 goto out_unlock;
1022 }
1023 usb_gadget_vbus_draw(dev->gadget, 2 * value);
1024
1025out_unlock:
1026 spin_unlock_irqrestore(&dev->lock, flags);
1027 return ret;
1028}
1029
1030static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
1031{
1032 struct raw_dev *dev = fd->private_data;
1033 int ret = 0;
1034
1035 if (!dev)
1036 return -EBUSY;
1037
1038 switch (cmd) {
1039 case USB_RAW_IOCTL_INIT:
1040 ret = raw_ioctl_init(dev, value);
1041 break;
1042 case USB_RAW_IOCTL_RUN:
1043 ret = raw_ioctl_run(dev, value);
1044 break;
1045 case USB_RAW_IOCTL_EVENT_FETCH:
1046 ret = raw_ioctl_event_fetch(dev, value);
1047 break;
1048 case USB_RAW_IOCTL_EP0_WRITE:
1049 ret = raw_ioctl_ep0_write(dev, value);
1050 break;
1051 case USB_RAW_IOCTL_EP0_READ:
1052 ret = raw_ioctl_ep0_read(dev, value);
1053 break;
1054 case USB_RAW_IOCTL_EP_ENABLE:
1055 ret = raw_ioctl_ep_enable(dev, value);
1056 break;
1057 case USB_RAW_IOCTL_EP_DISABLE:
1058 ret = raw_ioctl_ep_disable(dev, value);
1059 break;
1060 case USB_RAW_IOCTL_EP_WRITE:
1061 ret = raw_ioctl_ep_write(dev, value);
1062 break;
1063 case USB_RAW_IOCTL_EP_READ:
1064 ret = raw_ioctl_ep_read(dev, value);
1065 break;
1066 case USB_RAW_IOCTL_CONFIGURE:
1067 ret = raw_ioctl_configure(dev, value);
1068 break;
1069 case USB_RAW_IOCTL_VBUS_DRAW:
1070 ret = raw_ioctl_vbus_draw(dev, value);
1071 break;
1072 default:
1073 ret = -EINVAL;
1074 }
1075
1076 return ret;
1077}
1078
1079/*----------------------------------------------------------------------*/
1080
1081static const struct file_operations raw_fops = {
1082 .open = raw_open,
1083 .unlocked_ioctl = raw_ioctl,
1084 .compat_ioctl = raw_ioctl,
1085 .release = raw_release,
1086 .llseek = no_llseek,
1087};
1088
1089static struct miscdevice raw_misc_device = {
1090 .minor = MISC_DYNAMIC_MINOR,
1091 .name = DRIVER_NAME,
1092 .fops = &raw_fops,
1093};
1094
1095module_misc_device(raw_misc_device);