blob: 4e0e7baf85136d28573745d40e457e8de94e34cb [file] [log] [blame]
David Herrmann1ccd7a22012-06-10 15:16:13 +02001/*
2 * User-space I/O driver support for HID subsystem
3 * Copyright (c) 2012 David Herrmann
4 */
5
6/*
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 */
12
13#include <linux/atomic.h>
Dmitry Torokhovbefde022013-02-18 11:26:11 +010014#include <linux/compat.h>
David Herrmann1ccd7a22012-06-10 15:16:13 +020015#include <linux/device.h>
16#include <linux/fs.h>
17#include <linux/hid.h>
18#include <linux/input.h>
19#include <linux/miscdevice.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/spinlock.h>
25#include <linux/uhid.h>
26#include <linux/wait.h>
27
28#define UHID_NAME "uhid"
David Herrmannace3d862012-06-10 15:16:14 +020029#define UHID_BUFSIZE 32
30
31struct uhid_device {
David Herrmannd937ae52012-06-10 15:16:16 +020032 struct mutex devlock;
David Herrmannd365c6c2012-06-10 15:16:18 +020033 bool running;
34
35 __u8 *rd_data;
36 uint rd_size;
37
David Herrmannace3d862012-06-10 15:16:14 +020038 struct hid_device *hid;
David Herrmann6664ef72012-06-10 15:16:17 +020039 struct uhid_event input_buf;
David Herrmannace3d862012-06-10 15:16:14 +020040
41 wait_queue_head_t waitq;
42 spinlock_t qlock;
43 __u8 head;
44 __u8 tail;
45 struct uhid_event *outq[UHID_BUFSIZE];
David Herrmannfcfcf0d2012-06-10 15:16:25 +020046
David Herrmann8cad5b02014-07-29 17:14:19 +020047 /* blocking GET_REPORT support; state changes protected by qlock */
David Herrmannfcfcf0d2012-06-10 15:16:25 +020048 struct mutex report_lock;
49 wait_queue_head_t report_wait;
David Herrmann5942b842014-07-29 17:14:20 +020050 bool report_running;
David Herrmann8cad5b02014-07-29 17:14:19 +020051 u32 report_id;
David Herrmann11c22152014-07-29 17:14:24 +020052 u32 report_type;
David Herrmannfcfcf0d2012-06-10 15:16:25 +020053 struct uhid_event report_buf;
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -070054 struct work_struct worker;
David Herrmannace3d862012-06-10 15:16:14 +020055};
David Herrmann1ccd7a22012-06-10 15:16:13 +020056
57static struct miscdevice uhid_misc;
58
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -070059static void uhid_device_add_worker(struct work_struct *work)
60{
61 struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
62 int ret;
63
64 ret = hid_add_device(uhid->hid);
65 if (ret) {
66 hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
67
68 hid_destroy_device(uhid->hid);
69 uhid->hid = NULL;
70 uhid->running = false;
71 }
72}
73
David Herrmannace3d862012-06-10 15:16:14 +020074static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
75{
76 __u8 newhead;
77
78 newhead = (uhid->head + 1) % UHID_BUFSIZE;
79
80 if (newhead != uhid->tail) {
81 uhid->outq[uhid->head] = ev;
82 uhid->head = newhead;
83 wake_up_interruptible(&uhid->waitq);
84 } else {
85 hid_warn(uhid->hid, "Output queue is full\n");
86 kfree(ev);
87 }
88}
89
90static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
91{
92 unsigned long flags;
93 struct uhid_event *ev;
94
95 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
96 if (!ev)
97 return -ENOMEM;
98
99 ev->type = event;
100
101 spin_lock_irqsave(&uhid->qlock, flags);
102 uhid_queue(uhid, ev);
103 spin_unlock_irqrestore(&uhid->qlock, flags);
104
105 return 0;
106}
107
David Herrmannd365c6c2012-06-10 15:16:18 +0200108static int uhid_hid_start(struct hid_device *hid)
109{
David Herrmannec4b7de2012-06-10 15:16:21 +0200110 struct uhid_device *uhid = hid->driver_data;
David Herrmannc2b2f162014-07-29 17:14:25 +0200111 struct uhid_event *ev;
112 unsigned long flags;
David Herrmannec4b7de2012-06-10 15:16:21 +0200113
David Herrmannc2b2f162014-07-29 17:14:25 +0200114 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
115 if (!ev)
116 return -ENOMEM;
117
118 ev->type = UHID_START;
119
120 if (hid->report_enum[HID_FEATURE_REPORT].numbered)
121 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
122 if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
123 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
124 if (hid->report_enum[HID_INPUT_REPORT].numbered)
125 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
126
127 spin_lock_irqsave(&uhid->qlock, flags);
128 uhid_queue(uhid, ev);
129 spin_unlock_irqrestore(&uhid->qlock, flags);
130
131 return 0;
David Herrmannd365c6c2012-06-10 15:16:18 +0200132}
133
134static void uhid_hid_stop(struct hid_device *hid)
135{
David Herrmannec4b7de2012-06-10 15:16:21 +0200136 struct uhid_device *uhid = hid->driver_data;
137
138 hid->claimed = 0;
139 uhid_queue_event(uhid, UHID_STOP);
David Herrmannd365c6c2012-06-10 15:16:18 +0200140}
141
142static int uhid_hid_open(struct hid_device *hid)
143{
David Herrmanne7191472012-06-10 15:16:22 +0200144 struct uhid_device *uhid = hid->driver_data;
145
146 return uhid_queue_event(uhid, UHID_OPEN);
David Herrmannd365c6c2012-06-10 15:16:18 +0200147}
148
149static void uhid_hid_close(struct hid_device *hid)
150{
David Herrmanne7191472012-06-10 15:16:22 +0200151 struct uhid_device *uhid = hid->driver_data;
152
153 uhid_queue_event(uhid, UHID_CLOSE);
David Herrmannd365c6c2012-06-10 15:16:18 +0200154}
155
David Herrmannd365c6c2012-06-10 15:16:18 +0200156static int uhid_hid_parse(struct hid_device *hid)
157{
David Herrmann037c0612012-06-10 15:16:20 +0200158 struct uhid_device *uhid = hid->driver_data;
159
160 return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
David Herrmannd365c6c2012-06-10 15:16:18 +0200161}
162
David Herrmann11c22152014-07-29 17:14:24 +0200163/* must be called with report_lock held */
164static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
165 struct uhid_event *ev,
166 __u32 *report_id)
Jiri Kosina289a7162014-02-17 14:49:34 +0100167{
Jiri Kosina289a7162014-02-17 14:49:34 +0100168 unsigned long flags;
169 int ret;
Jiri Kosina289a7162014-02-17 14:49:34 +0100170
171 spin_lock_irqsave(&uhid->qlock, flags);
David Herrmann11c22152014-07-29 17:14:24 +0200172 *report_id = ++uhid->report_id;
Benjamin Tissoires8493ecc2014-10-01 11:59:47 -0400173 uhid->report_type = ev->type + 1;
David Herrmann5942b842014-07-29 17:14:20 +0200174 uhid->report_running = true;
Jiri Kosina289a7162014-02-17 14:49:34 +0100175 uhid_queue(uhid, ev);
176 spin_unlock_irqrestore(&uhid->qlock, flags);
177
178 ret = wait_event_interruptible_timeout(uhid->report_wait,
David Herrmann5942b842014-07-29 17:14:20 +0200179 !uhid->report_running || !uhid->running,
180 5 * HZ);
David Herrmann11c22152014-07-29 17:14:24 +0200181 if (!ret || !uhid->running || uhid->report_running)
Jiri Kosina289a7162014-02-17 14:49:34 +0100182 ret = -EIO;
David Herrmann11c22152014-07-29 17:14:24 +0200183 else if (ret < 0)
Jiri Kosina289a7162014-02-17 14:49:34 +0100184 ret = -ERESTARTSYS;
David Herrmann11c22152014-07-29 17:14:24 +0200185 else
186 ret = 0;
Jiri Kosina289a7162014-02-17 14:49:34 +0100187
David Herrmann5942b842014-07-29 17:14:20 +0200188 uhid->report_running = false;
Jiri Kosina289a7162014-02-17 14:49:34 +0100189
David Herrmann11c22152014-07-29 17:14:24 +0200190 return ret;
191}
192
193static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
194 const struct uhid_event *ev)
195{
196 unsigned long flags;
197
198 spin_lock_irqsave(&uhid->qlock, flags);
199
200 /* id for old report; drop it silently */
201 if (uhid->report_type != ev->type || uhid->report_id != id)
202 goto unlock;
203 if (!uhid->report_running)
204 goto unlock;
205
206 memcpy(&uhid->report_buf, ev, sizeof(*ev));
207 uhid->report_running = false;
208 wake_up_interruptible(&uhid->report_wait);
209
210unlock:
211 spin_unlock_irqrestore(&uhid->qlock, flags);
212}
213
214static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
215 u8 *buf, size_t count, u8 rtype)
216{
217 struct uhid_device *uhid = hid->driver_data;
218 struct uhid_get_report_reply_req *req;
219 struct uhid_event *ev;
220 int ret;
221
222 if (!uhid->running)
223 return -EIO;
224
225 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
226 if (!ev)
227 return -ENOMEM;
228
229 ev->type = UHID_GET_REPORT;
230 ev->u.get_report.rnum = rnum;
231 ev->u.get_report.rtype = rtype;
232
233 ret = mutex_lock_interruptible(&uhid->report_lock);
234 if (ret) {
235 kfree(ev);
236 return ret;
237 }
238
239 /* this _always_ takes ownership of @ev */
240 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
241 if (ret)
242 goto unlock;
243
244 req = &uhid->report_buf.u.get_report_reply;
245 if (req->err) {
246 ret = -EIO;
247 } else {
248 ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
249 memcpy(buf, req->data, ret);
250 }
251
Jiri Kosina289a7162014-02-17 14:49:34 +0100252unlock:
253 mutex_unlock(&uhid->report_lock);
David Herrmann11c22152014-07-29 17:14:24 +0200254 return ret;
255}
256
257static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
258 const u8 *buf, size_t count, u8 rtype)
259{
260 struct uhid_device *uhid = hid->driver_data;
261 struct uhid_event *ev;
262 int ret;
263
264 if (!uhid->running || count > UHID_DATA_MAX)
265 return -EIO;
266
267 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
268 if (!ev)
269 return -ENOMEM;
270
271 ev->type = UHID_SET_REPORT;
272 ev->u.set_report.rnum = rnum;
273 ev->u.set_report.rtype = rtype;
274 ev->u.set_report.size = count;
275 memcpy(ev->u.set_report.data, buf, count);
276
277 ret = mutex_lock_interruptible(&uhid->report_lock);
278 if (ret) {
279 kfree(ev);
280 return ret;
281 }
282
283 /* this _always_ takes ownership of @ev */
284 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
285 if (ret)
286 goto unlock;
287
288 if (uhid->report_buf.u.set_report_reply.err)
289 ret = -EIO;
290 else
291 ret = count;
292
293unlock:
294 mutex_unlock(&uhid->report_lock);
295 return ret;
Jiri Kosina289a7162014-02-17 14:49:34 +0100296}
297
David Herrmann7c4003b2014-07-29 17:14:23 +0200298static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
299 __u8 *buf, size_t len, unsigned char rtype,
300 int reqtype)
301{
David Herrmann11c22152014-07-29 17:14:24 +0200302 u8 u_rtype;
303
304 switch (rtype) {
305 case HID_FEATURE_REPORT:
306 u_rtype = UHID_FEATURE_REPORT;
307 break;
308 case HID_OUTPUT_REPORT:
309 u_rtype = UHID_OUTPUT_REPORT;
310 break;
311 case HID_INPUT_REPORT:
312 u_rtype = UHID_INPUT_REPORT;
313 break;
314 default:
315 return -EINVAL;
316 }
317
David Herrmann7c4003b2014-07-29 17:14:23 +0200318 switch (reqtype) {
319 case HID_REQ_GET_REPORT:
David Herrmann11c22152014-07-29 17:14:24 +0200320 return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
David Herrmann7c4003b2014-07-29 17:14:23 +0200321 case HID_REQ_SET_REPORT:
David Herrmann11c22152014-07-29 17:14:24 +0200322 return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
David Herrmann7c4003b2014-07-29 17:14:23 +0200323 default:
324 return -EIO;
325 }
326}
327
David Herrmannd365c6c2012-06-10 15:16:18 +0200328static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
329 unsigned char report_type)
330{
David Herrmann3b3baa82012-06-10 15:16:24 +0200331 struct uhid_device *uhid = hid->driver_data;
332 __u8 rtype;
333 unsigned long flags;
334 struct uhid_event *ev;
335
336 switch (report_type) {
337 case HID_FEATURE_REPORT:
338 rtype = UHID_FEATURE_REPORT;
339 break;
340 case HID_OUTPUT_REPORT:
341 rtype = UHID_OUTPUT_REPORT;
342 break;
343 default:
344 return -EINVAL;
345 }
346
347 if (count < 1 || count > UHID_DATA_MAX)
348 return -EINVAL;
349
350 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
351 if (!ev)
352 return -ENOMEM;
353
354 ev->type = UHID_OUTPUT;
355 ev->u.output.size = count;
356 ev->u.output.rtype = rtype;
357 memcpy(ev->u.output.data, buf, count);
358
359 spin_lock_irqsave(&uhid->qlock, flags);
360 uhid_queue(uhid, ev);
361 spin_unlock_irqrestore(&uhid->qlock, flags);
362
363 return count;
David Herrmannd365c6c2012-06-10 15:16:18 +0200364}
365
Frank Praznik596cfdd2014-01-22 13:49:43 -0500366static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
367 size_t count)
368{
Benjamin Tissoires41abfb32014-02-10 12:58:46 -0500369 return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
Frank Praznik596cfdd2014-01-22 13:49:43 -0500370}
371
Jason Gereckefc2237a2017-07-24 09:46:18 -0700372struct hid_ll_driver uhid_hid_driver = {
David Herrmannd365c6c2012-06-10 15:16:18 +0200373 .start = uhid_hid_start,
374 .stop = uhid_hid_stop,
375 .open = uhid_hid_open,
376 .close = uhid_hid_close,
David Herrmannd365c6c2012-06-10 15:16:18 +0200377 .parse = uhid_hid_parse,
David Herrmann7c4003b2014-07-29 17:14:23 +0200378 .raw_request = uhid_hid_raw_request,
Frank Praznik596cfdd2014-01-22 13:49:43 -0500379 .output_report = uhid_hid_output_report,
David Herrmannd365c6c2012-06-10 15:16:18 +0200380};
Jason Gereckefc2237a2017-07-24 09:46:18 -0700381EXPORT_SYMBOL_GPL(uhid_hid_driver);
David Herrmannd365c6c2012-06-10 15:16:18 +0200382
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100383#ifdef CONFIG_COMPAT
384
385/* Apparently we haven't stepped on these rakes enough times yet. */
386struct uhid_create_req_compat {
387 __u8 name[128];
388 __u8 phys[64];
389 __u8 uniq[64];
390
391 compat_uptr_t rd_data;
392 __u16 rd_size;
393
394 __u16 bus;
395 __u32 vendor;
396 __u32 product;
397 __u32 version;
398 __u32 country;
399} __attribute__((__packed__));
400
401static int uhid_event_from_user(const char __user *buffer, size_t len,
402 struct uhid_event *event)
403{
Andy Lutomirski7365abb2016-03-22 14:25:24 -0700404 if (in_compat_syscall()) {
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100405 u32 type;
406
407 if (get_user(type, buffer))
408 return -EFAULT;
409
410 if (type == UHID_CREATE) {
411 /*
412 * This is our messed up request with compat pointer.
413 * It is largish (more than 256 bytes) so we better
414 * allocate it from the heap.
415 */
416 struct uhid_create_req_compat *compat;
417
David Herrmann80897aa2013-11-26 13:58:18 +0100418 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100419 if (!compat)
420 return -ENOMEM;
421
422 buffer += sizeof(type);
423 len -= sizeof(type);
424 if (copy_from_user(compat, buffer,
425 min(len, sizeof(*compat)))) {
426 kfree(compat);
427 return -EFAULT;
428 }
429
430 /* Shuffle the data over to proper structure */
431 event->type = type;
432
433 memcpy(event->u.create.name, compat->name,
434 sizeof(compat->name));
435 memcpy(event->u.create.phys, compat->phys,
436 sizeof(compat->phys));
437 memcpy(event->u.create.uniq, compat->uniq,
438 sizeof(compat->uniq));
439
440 event->u.create.rd_data = compat_ptr(compat->rd_data);
441 event->u.create.rd_size = compat->rd_size;
442
443 event->u.create.bus = compat->bus;
444 event->u.create.vendor = compat->vendor;
445 event->u.create.product = compat->product;
446 event->u.create.version = compat->version;
447 event->u.create.country = compat->country;
448
449 kfree(compat);
450 return 0;
451 }
452 /* All others can be copied directly */
453 }
454
455 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
456 return -EFAULT;
457
458 return 0;
459}
460#else
461static int uhid_event_from_user(const char __user *buffer, size_t len,
462 struct uhid_event *event)
463{
464 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
465 return -EFAULT;
466
467 return 0;
468}
469#endif
470
Petri Gynther45226432014-03-24 13:50:01 -0700471static int uhid_dev_create2(struct uhid_device *uhid,
472 const struct uhid_event *ev)
473{
474 struct hid_device *hid;
David Herrmann25be7fe2014-07-29 17:14:18 +0200475 size_t rd_size, len;
David Herrmann41c4a4642014-07-29 17:14:17 +0200476 void *rd_data;
Petri Gynther45226432014-03-24 13:50:01 -0700477 int ret;
478
479 if (uhid->running)
480 return -EALREADY;
481
David Herrmann41c4a4642014-07-29 17:14:17 +0200482 rd_size = ev->u.create2.rd_size;
483 if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
Petri Gynther45226432014-03-24 13:50:01 -0700484 return -EINVAL;
485
David Herrmann41c4a4642014-07-29 17:14:17 +0200486 rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
487 if (!rd_data)
Petri Gynther45226432014-03-24 13:50:01 -0700488 return -ENOMEM;
489
David Herrmann41c4a4642014-07-29 17:14:17 +0200490 uhid->rd_size = rd_size;
491 uhid->rd_data = rd_data;
492
Petri Gynther45226432014-03-24 13:50:01 -0700493 hid = hid_allocate_device();
494 if (IS_ERR(hid)) {
495 ret = PTR_ERR(hid);
496 goto err_free;
497 }
498
David Herrmann25be7fe2014-07-29 17:14:18 +0200499 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
500 strncpy(hid->name, ev->u.create2.name, len);
501 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
502 strncpy(hid->phys, ev->u.create2.phys, len);
503 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
504 strncpy(hid->uniq, ev->u.create2.uniq, len);
Petri Gynther45226432014-03-24 13:50:01 -0700505
506 hid->ll_driver = &uhid_hid_driver;
507 hid->bus = ev->u.create2.bus;
508 hid->vendor = ev->u.create2.vendor;
509 hid->product = ev->u.create2.product;
510 hid->version = ev->u.create2.version;
511 hid->country = ev->u.create2.country;
512 hid->driver_data = uhid;
513 hid->dev.parent = uhid_misc.this_device;
514
515 uhid->hid = hid;
516 uhid->running = true;
517
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -0700518 /* Adding of a HID device is done through a worker, to allow HID drivers
519 * which use feature requests during .probe to work, without they would
520 * be blocked on devlock, which is held by uhid_char_write.
521 */
522 schedule_work(&uhid->worker);
Petri Gynther45226432014-03-24 13:50:01 -0700523
524 return 0;
525
Petri Gynther45226432014-03-24 13:50:01 -0700526err_free:
527 kfree(uhid->rd_data);
David Herrmann41c4a4642014-07-29 17:14:17 +0200528 uhid->rd_data = NULL;
529 uhid->rd_size = 0;
Petri Gynther45226432014-03-24 13:50:01 -0700530 return ret;
531}
532
David Herrmann56c47752014-07-29 17:14:16 +0200533static int uhid_dev_create(struct uhid_device *uhid,
534 struct uhid_event *ev)
535{
536 struct uhid_create_req orig;
537
538 orig = ev->u.create;
539
540 if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
541 return -EINVAL;
542 if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
543 return -EFAULT;
544
545 memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
546 memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
547 memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
548 ev->u.create2.rd_size = orig.rd_size;
549 ev->u.create2.bus = orig.bus;
550 ev->u.create2.vendor = orig.vendor;
551 ev->u.create2.product = orig.product;
552 ev->u.create2.version = orig.version;
553 ev->u.create2.country = orig.country;
554
555 return uhid_dev_create2(uhid, ev);
556}
557
David Herrmannd365c6c2012-06-10 15:16:18 +0200558static int uhid_dev_destroy(struct uhid_device *uhid)
559{
560 if (!uhid->running)
561 return -EINVAL;
562
563 uhid->running = false;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200564 wake_up_interruptible(&uhid->report_wait);
David Herrmannd365c6c2012-06-10 15:16:18 +0200565
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -0700566 cancel_work_sync(&uhid->worker);
567
David Herrmannd365c6c2012-06-10 15:16:18 +0200568 hid_destroy_device(uhid->hid);
569 kfree(uhid->rd_data);
570
571 return 0;
572}
573
David Herrmann5e87a362012-06-10 15:16:19 +0200574static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
575{
576 if (!uhid->running)
577 return -EINVAL;
578
579 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
580 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
581
582 return 0;
583}
584
Petri Gynther45226432014-03-24 13:50:01 -0700585static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
586{
587 if (!uhid->running)
588 return -EINVAL;
589
590 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
591 min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
592
593 return 0;
594}
595
David Herrmannfa71f322014-07-29 17:14:21 +0200596static int uhid_dev_get_report_reply(struct uhid_device *uhid,
597 struct uhid_event *ev)
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200598{
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200599 if (!uhid->running)
600 return -EINVAL;
601
David Herrmann11c22152014-07-29 17:14:24 +0200602 uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
603 return 0;
604}
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200605
David Herrmann11c22152014-07-29 17:14:24 +0200606static int uhid_dev_set_report_reply(struct uhid_device *uhid,
607 struct uhid_event *ev)
608{
609 if (!uhid->running)
610 return -EINVAL;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200611
David Herrmann11c22152014-07-29 17:14:24 +0200612 uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200613 return 0;
614}
615
David Herrmann1ccd7a22012-06-10 15:16:13 +0200616static int uhid_char_open(struct inode *inode, struct file *file)
617{
David Herrmannace3d862012-06-10 15:16:14 +0200618 struct uhid_device *uhid;
619
620 uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
621 if (!uhid)
622 return -ENOMEM;
623
David Herrmannd937ae52012-06-10 15:16:16 +0200624 mutex_init(&uhid->devlock);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200625 mutex_init(&uhid->report_lock);
David Herrmannace3d862012-06-10 15:16:14 +0200626 spin_lock_init(&uhid->qlock);
627 init_waitqueue_head(&uhid->waitq);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200628 init_waitqueue_head(&uhid->report_wait);
David Herrmannd365c6c2012-06-10 15:16:18 +0200629 uhid->running = false;
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -0700630 INIT_WORK(&uhid->worker, uhid_device_add_worker);
David Herrmannace3d862012-06-10 15:16:14 +0200631
632 file->private_data = uhid;
633 nonseekable_open(inode, file);
634
David Herrmann1ccd7a22012-06-10 15:16:13 +0200635 return 0;
636}
637
638static int uhid_char_release(struct inode *inode, struct file *file)
639{
David Herrmannace3d862012-06-10 15:16:14 +0200640 struct uhid_device *uhid = file->private_data;
641 unsigned int i;
642
David Herrmannd365c6c2012-06-10 15:16:18 +0200643 uhid_dev_destroy(uhid);
644
David Herrmannace3d862012-06-10 15:16:14 +0200645 for (i = 0; i < UHID_BUFSIZE; ++i)
646 kfree(uhid->outq[i]);
647
648 kfree(uhid);
649
David Herrmann1ccd7a22012-06-10 15:16:13 +0200650 return 0;
651}
652
653static ssize_t uhid_char_read(struct file *file, char __user *buffer,
654 size_t count, loff_t *ppos)
655{
David Herrmannd937ae52012-06-10 15:16:16 +0200656 struct uhid_device *uhid = file->private_data;
657 int ret;
658 unsigned long flags;
659 size_t len;
660
661 /* they need at least the "type" member of uhid_event */
662 if (count < sizeof(__u32))
663 return -EINVAL;
664
665try_again:
666 if (file->f_flags & O_NONBLOCK) {
667 if (uhid->head == uhid->tail)
668 return -EAGAIN;
669 } else {
670 ret = wait_event_interruptible(uhid->waitq,
671 uhid->head != uhid->tail);
672 if (ret)
673 return ret;
674 }
675
676 ret = mutex_lock_interruptible(&uhid->devlock);
677 if (ret)
678 return ret;
679
680 if (uhid->head == uhid->tail) {
681 mutex_unlock(&uhid->devlock);
682 goto try_again;
683 } else {
684 len = min(count, sizeof(**uhid->outq));
Vinicius Costa Gomesadefb692012-07-14 18:59:25 -0300685 if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
David Herrmannd937ae52012-06-10 15:16:16 +0200686 ret = -EFAULT;
687 } else {
688 kfree(uhid->outq[uhid->tail]);
689 uhid->outq[uhid->tail] = NULL;
690
691 spin_lock_irqsave(&uhid->qlock, flags);
692 uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
693 spin_unlock_irqrestore(&uhid->qlock, flags);
694 }
695 }
696
697 mutex_unlock(&uhid->devlock);
698 return ret ? ret : len;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200699}
700
701static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
702 size_t count, loff_t *ppos)
703{
David Herrmann6664ef72012-06-10 15:16:17 +0200704 struct uhid_device *uhid = file->private_data;
705 int ret;
706 size_t len;
707
708 /* we need at least the "type" member of uhid_event */
709 if (count < sizeof(__u32))
710 return -EINVAL;
711
712 ret = mutex_lock_interruptible(&uhid->devlock);
713 if (ret)
714 return ret;
715
716 memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
717 len = min(count, sizeof(uhid->input_buf));
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100718
719 ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
720 if (ret)
David Herrmann6664ef72012-06-10 15:16:17 +0200721 goto unlock;
David Herrmann6664ef72012-06-10 15:16:17 +0200722
723 switch (uhid->input_buf.type) {
David Herrmannd365c6c2012-06-10 15:16:18 +0200724 case UHID_CREATE:
725 ret = uhid_dev_create(uhid, &uhid->input_buf);
726 break;
Petri Gynther45226432014-03-24 13:50:01 -0700727 case UHID_CREATE2:
728 ret = uhid_dev_create2(uhid, &uhid->input_buf);
729 break;
David Herrmannd365c6c2012-06-10 15:16:18 +0200730 case UHID_DESTROY:
731 ret = uhid_dev_destroy(uhid);
732 break;
David Herrmann5e87a362012-06-10 15:16:19 +0200733 case UHID_INPUT:
734 ret = uhid_dev_input(uhid, &uhid->input_buf);
735 break;
Petri Gynther45226432014-03-24 13:50:01 -0700736 case UHID_INPUT2:
737 ret = uhid_dev_input2(uhid, &uhid->input_buf);
738 break;
David Herrmannfa71f322014-07-29 17:14:21 +0200739 case UHID_GET_REPORT_REPLY:
740 ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200741 break;
David Herrmann11c22152014-07-29 17:14:24 +0200742 case UHID_SET_REPORT_REPLY:
743 ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
744 break;
David Herrmann6664ef72012-06-10 15:16:17 +0200745 default:
746 ret = -EOPNOTSUPP;
747 }
748
749unlock:
750 mutex_unlock(&uhid->devlock);
751
752 /* return "count" not "len" to not confuse the caller */
753 return ret ? ret : count;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200754}
755
Al Viroafc9a422017-07-03 06:39:46 -0400756static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
David Herrmann1ccd7a22012-06-10 15:16:13 +0200757{
David Herrmann1f9dec12012-06-10 15:16:15 +0200758 struct uhid_device *uhid = file->private_data;
759
760 poll_wait(file, &uhid->waitq, wait);
761
762 if (uhid->head != uhid->tail)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800763 return EPOLLIN | EPOLLRDNORM;
David Herrmann1f9dec12012-06-10 15:16:15 +0200764
David Herrmann1ccd7a22012-06-10 15:16:13 +0200765 return 0;
766}
767
768static const struct file_operations uhid_fops = {
769 .owner = THIS_MODULE,
770 .open = uhid_char_open,
771 .release = uhid_char_release,
772 .read = uhid_char_read,
773 .write = uhid_char_write,
774 .poll = uhid_char_poll,
775 .llseek = no_llseek,
776};
777
778static struct miscdevice uhid_misc = {
779 .fops = &uhid_fops,
David Herrmann19872d22013-09-09 18:33:54 +0200780 .minor = UHID_MINOR,
David Herrmann1ccd7a22012-06-10 15:16:13 +0200781 .name = UHID_NAME,
782};
PrasannaKumar Muralidharanca75d602016-08-25 22:30:49 +0530783module_misc_device(uhid_misc);
David Herrmann1ccd7a22012-06-10 15:16:13 +0200784
David Herrmann1ccd7a22012-06-10 15:16:13 +0200785MODULE_LICENSE("GPL");
786MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
787MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
David Herrmann19872d22013-09-09 18:33:54 +0200788MODULE_ALIAS_MISCDEV(UHID_MINOR);
Marcel Holtmann60cbd532013-09-01 11:02:46 -0700789MODULE_ALIAS("devname:" UHID_NAME);