blob: 8fe3efcb832715c4a978625387dc0bb0ea5b00a2 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Herrmann1ccd7a22012-06-10 15:16:13 +02002/*
3 * User-space I/O driver support for HID subsystem
4 * Copyright (c) 2012 David Herrmann
5 */
6
7/*
David Herrmann1ccd7a22012-06-10 15:16:13 +02008 */
9
10#include <linux/atomic.h>
Dmitry Torokhovbefde022013-02-18 11:26:11 +010011#include <linux/compat.h>
Eric Biggers8c01db72018-11-14 13:55:09 -080012#include <linux/cred.h>
David Herrmann1ccd7a22012-06-10 15:16:13 +020013#include <linux/device.h>
14#include <linux/fs.h>
15#include <linux/hid.h>
16#include <linux/input.h>
17#include <linux/miscdevice.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/poll.h>
21#include <linux/sched.h>
22#include <linux/spinlock.h>
23#include <linux/uhid.h>
24#include <linux/wait.h>
25
26#define UHID_NAME "uhid"
David Herrmannace3d862012-06-10 15:16:14 +020027#define UHID_BUFSIZE 32
28
29struct uhid_device {
David Herrmannd937ae52012-06-10 15:16:16 +020030 struct mutex devlock;
David Herrmannd365c6c2012-06-10 15:16:18 +020031 bool running;
32
33 __u8 *rd_data;
34 uint rd_size;
35
David Herrmannace3d862012-06-10 15:16:14 +020036 struct hid_device *hid;
David Herrmann6664ef72012-06-10 15:16:17 +020037 struct uhid_event input_buf;
David Herrmannace3d862012-06-10 15:16:14 +020038
39 wait_queue_head_t waitq;
40 spinlock_t qlock;
41 __u8 head;
42 __u8 tail;
43 struct uhid_event *outq[UHID_BUFSIZE];
David Herrmannfcfcf0d2012-06-10 15:16:25 +020044
David Herrmann8cad5b02014-07-29 17:14:19 +020045 /* blocking GET_REPORT support; state changes protected by qlock */
David Herrmannfcfcf0d2012-06-10 15:16:25 +020046 struct mutex report_lock;
47 wait_queue_head_t report_wait;
David Herrmann5942b842014-07-29 17:14:20 +020048 bool report_running;
David Herrmann8cad5b02014-07-29 17:14:19 +020049 u32 report_id;
David Herrmann11c22152014-07-29 17:14:24 +020050 u32 report_type;
David Herrmannfcfcf0d2012-06-10 15:16:25 +020051 struct uhid_event report_buf;
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -070052 struct work_struct worker;
David Herrmannace3d862012-06-10 15:16:14 +020053};
David Herrmann1ccd7a22012-06-10 15:16:13 +020054
55static struct miscdevice uhid_misc;
56
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -070057static void uhid_device_add_worker(struct work_struct *work)
58{
59 struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
60 int ret;
61
62 ret = hid_add_device(uhid->hid);
63 if (ret) {
64 hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
65
66 hid_destroy_device(uhid->hid);
67 uhid->hid = NULL;
68 uhid->running = false;
69 }
70}
71
David Herrmannace3d862012-06-10 15:16:14 +020072static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
73{
74 __u8 newhead;
75
76 newhead = (uhid->head + 1) % UHID_BUFSIZE;
77
78 if (newhead != uhid->tail) {
79 uhid->outq[uhid->head] = ev;
80 uhid->head = newhead;
81 wake_up_interruptible(&uhid->waitq);
82 } else {
83 hid_warn(uhid->hid, "Output queue is full\n");
84 kfree(ev);
85 }
86}
87
88static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
89{
90 unsigned long flags;
91 struct uhid_event *ev;
92
93 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
94 if (!ev)
95 return -ENOMEM;
96
97 ev->type = event;
98
99 spin_lock_irqsave(&uhid->qlock, flags);
100 uhid_queue(uhid, ev);
101 spin_unlock_irqrestore(&uhid->qlock, flags);
102
103 return 0;
104}
105
David Herrmannd365c6c2012-06-10 15:16:18 +0200106static int uhid_hid_start(struct hid_device *hid)
107{
David Herrmannec4b7de2012-06-10 15:16:21 +0200108 struct uhid_device *uhid = hid->driver_data;
David Herrmannc2b2f162014-07-29 17:14:25 +0200109 struct uhid_event *ev;
110 unsigned long flags;
David Herrmannec4b7de2012-06-10 15:16:21 +0200111
David Herrmannc2b2f162014-07-29 17:14:25 +0200112 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
113 if (!ev)
114 return -ENOMEM;
115
116 ev->type = UHID_START;
117
118 if (hid->report_enum[HID_FEATURE_REPORT].numbered)
119 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
120 if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
121 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
122 if (hid->report_enum[HID_INPUT_REPORT].numbered)
123 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
124
125 spin_lock_irqsave(&uhid->qlock, flags);
126 uhid_queue(uhid, ev);
127 spin_unlock_irqrestore(&uhid->qlock, flags);
128
129 return 0;
David Herrmannd365c6c2012-06-10 15:16:18 +0200130}
131
132static void uhid_hid_stop(struct hid_device *hid)
133{
David Herrmannec4b7de2012-06-10 15:16:21 +0200134 struct uhid_device *uhid = hid->driver_data;
135
136 hid->claimed = 0;
137 uhid_queue_event(uhid, UHID_STOP);
David Herrmannd365c6c2012-06-10 15:16:18 +0200138}
139
140static int uhid_hid_open(struct hid_device *hid)
141{
David Herrmanne7191472012-06-10 15:16:22 +0200142 struct uhid_device *uhid = hid->driver_data;
143
144 return uhid_queue_event(uhid, UHID_OPEN);
David Herrmannd365c6c2012-06-10 15:16:18 +0200145}
146
147static void uhid_hid_close(struct hid_device *hid)
148{
David Herrmanne7191472012-06-10 15:16:22 +0200149 struct uhid_device *uhid = hid->driver_data;
150
151 uhid_queue_event(uhid, UHID_CLOSE);
David Herrmannd365c6c2012-06-10 15:16:18 +0200152}
153
David Herrmannd365c6c2012-06-10 15:16:18 +0200154static int uhid_hid_parse(struct hid_device *hid)
155{
David Herrmann037c0612012-06-10 15:16:20 +0200156 struct uhid_device *uhid = hid->driver_data;
157
158 return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
David Herrmannd365c6c2012-06-10 15:16:18 +0200159}
160
David Herrmann11c22152014-07-29 17:14:24 +0200161/* must be called with report_lock held */
162static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
163 struct uhid_event *ev,
164 __u32 *report_id)
Jiri Kosina289a7162014-02-17 14:49:34 +0100165{
Jiri Kosina289a7162014-02-17 14:49:34 +0100166 unsigned long flags;
167 int ret;
Jiri Kosina289a7162014-02-17 14:49:34 +0100168
169 spin_lock_irqsave(&uhid->qlock, flags);
David Herrmann11c22152014-07-29 17:14:24 +0200170 *report_id = ++uhid->report_id;
Benjamin Tissoires8493ecc2014-10-01 11:59:47 -0400171 uhid->report_type = ev->type + 1;
David Herrmann5942b842014-07-29 17:14:20 +0200172 uhid->report_running = true;
Jiri Kosina289a7162014-02-17 14:49:34 +0100173 uhid_queue(uhid, ev);
174 spin_unlock_irqrestore(&uhid->qlock, flags);
175
176 ret = wait_event_interruptible_timeout(uhid->report_wait,
David Herrmann5942b842014-07-29 17:14:20 +0200177 !uhid->report_running || !uhid->running,
178 5 * HZ);
David Herrmann11c22152014-07-29 17:14:24 +0200179 if (!ret || !uhid->running || uhid->report_running)
Jiri Kosina289a7162014-02-17 14:49:34 +0100180 ret = -EIO;
David Herrmann11c22152014-07-29 17:14:24 +0200181 else if (ret < 0)
Jiri Kosina289a7162014-02-17 14:49:34 +0100182 ret = -ERESTARTSYS;
David Herrmann11c22152014-07-29 17:14:24 +0200183 else
184 ret = 0;
Jiri Kosina289a7162014-02-17 14:49:34 +0100185
David Herrmann5942b842014-07-29 17:14:20 +0200186 uhid->report_running = false;
Jiri Kosina289a7162014-02-17 14:49:34 +0100187
David Herrmann11c22152014-07-29 17:14:24 +0200188 return ret;
189}
190
191static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
192 const struct uhid_event *ev)
193{
194 unsigned long flags;
195
196 spin_lock_irqsave(&uhid->qlock, flags);
197
198 /* id for old report; drop it silently */
199 if (uhid->report_type != ev->type || uhid->report_id != id)
200 goto unlock;
201 if (!uhid->report_running)
202 goto unlock;
203
204 memcpy(&uhid->report_buf, ev, sizeof(*ev));
205 uhid->report_running = false;
206 wake_up_interruptible(&uhid->report_wait);
207
208unlock:
209 spin_unlock_irqrestore(&uhid->qlock, flags);
210}
211
212static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
213 u8 *buf, size_t count, u8 rtype)
214{
215 struct uhid_device *uhid = hid->driver_data;
216 struct uhid_get_report_reply_req *req;
217 struct uhid_event *ev;
218 int ret;
219
220 if (!uhid->running)
221 return -EIO;
222
223 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
224 if (!ev)
225 return -ENOMEM;
226
227 ev->type = UHID_GET_REPORT;
228 ev->u.get_report.rnum = rnum;
229 ev->u.get_report.rtype = rtype;
230
231 ret = mutex_lock_interruptible(&uhid->report_lock);
232 if (ret) {
233 kfree(ev);
234 return ret;
235 }
236
237 /* this _always_ takes ownership of @ev */
238 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
239 if (ret)
240 goto unlock;
241
242 req = &uhid->report_buf.u.get_report_reply;
243 if (req->err) {
244 ret = -EIO;
245 } else {
246 ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
247 memcpy(buf, req->data, ret);
248 }
249
Jiri Kosina289a7162014-02-17 14:49:34 +0100250unlock:
251 mutex_unlock(&uhid->report_lock);
David Herrmann11c22152014-07-29 17:14:24 +0200252 return ret;
253}
254
255static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
256 const u8 *buf, size_t count, u8 rtype)
257{
258 struct uhid_device *uhid = hid->driver_data;
259 struct uhid_event *ev;
260 int ret;
261
262 if (!uhid->running || count > UHID_DATA_MAX)
263 return -EIO;
264
265 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
266 if (!ev)
267 return -ENOMEM;
268
269 ev->type = UHID_SET_REPORT;
270 ev->u.set_report.rnum = rnum;
271 ev->u.set_report.rtype = rtype;
272 ev->u.set_report.size = count;
273 memcpy(ev->u.set_report.data, buf, count);
274
275 ret = mutex_lock_interruptible(&uhid->report_lock);
276 if (ret) {
277 kfree(ev);
278 return ret;
279 }
280
281 /* this _always_ takes ownership of @ev */
282 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
283 if (ret)
284 goto unlock;
285
286 if (uhid->report_buf.u.set_report_reply.err)
287 ret = -EIO;
288 else
289 ret = count;
290
291unlock:
292 mutex_unlock(&uhid->report_lock);
293 return ret;
Jiri Kosina289a7162014-02-17 14:49:34 +0100294}
295
David Herrmann7c4003b2014-07-29 17:14:23 +0200296static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
297 __u8 *buf, size_t len, unsigned char rtype,
298 int reqtype)
299{
David Herrmann11c22152014-07-29 17:14:24 +0200300 u8 u_rtype;
301
302 switch (rtype) {
303 case HID_FEATURE_REPORT:
304 u_rtype = UHID_FEATURE_REPORT;
305 break;
306 case HID_OUTPUT_REPORT:
307 u_rtype = UHID_OUTPUT_REPORT;
308 break;
309 case HID_INPUT_REPORT:
310 u_rtype = UHID_INPUT_REPORT;
311 break;
312 default:
313 return -EINVAL;
314 }
315
David Herrmann7c4003b2014-07-29 17:14:23 +0200316 switch (reqtype) {
317 case HID_REQ_GET_REPORT:
David Herrmann11c22152014-07-29 17:14:24 +0200318 return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
David Herrmann7c4003b2014-07-29 17:14:23 +0200319 case HID_REQ_SET_REPORT:
David Herrmann11c22152014-07-29 17:14:24 +0200320 return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
David Herrmann7c4003b2014-07-29 17:14:23 +0200321 default:
322 return -EIO;
323 }
324}
325
David Herrmannd365c6c2012-06-10 15:16:18 +0200326static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
327 unsigned char report_type)
328{
David Herrmann3b3baa82012-06-10 15:16:24 +0200329 struct uhid_device *uhid = hid->driver_data;
330 __u8 rtype;
331 unsigned long flags;
332 struct uhid_event *ev;
333
334 switch (report_type) {
335 case HID_FEATURE_REPORT:
336 rtype = UHID_FEATURE_REPORT;
337 break;
338 case HID_OUTPUT_REPORT:
339 rtype = UHID_OUTPUT_REPORT;
340 break;
341 default:
342 return -EINVAL;
343 }
344
345 if (count < 1 || count > UHID_DATA_MAX)
346 return -EINVAL;
347
348 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
349 if (!ev)
350 return -ENOMEM;
351
352 ev->type = UHID_OUTPUT;
353 ev->u.output.size = count;
354 ev->u.output.rtype = rtype;
355 memcpy(ev->u.output.data, buf, count);
356
357 spin_lock_irqsave(&uhid->qlock, flags);
358 uhid_queue(uhid, ev);
359 spin_unlock_irqrestore(&uhid->qlock, flags);
360
361 return count;
David Herrmannd365c6c2012-06-10 15:16:18 +0200362}
363
Frank Praznik596cfdd2014-01-22 13:49:43 -0500364static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
365 size_t count)
366{
Benjamin Tissoires41abfb32014-02-10 12:58:46 -0500367 return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
Frank Praznik596cfdd2014-01-22 13:49:43 -0500368}
369
Jason Gereckefc2237a2017-07-24 09:46:18 -0700370struct hid_ll_driver uhid_hid_driver = {
David Herrmannd365c6c2012-06-10 15:16:18 +0200371 .start = uhid_hid_start,
372 .stop = uhid_hid_stop,
373 .open = uhid_hid_open,
374 .close = uhid_hid_close,
David Herrmannd365c6c2012-06-10 15:16:18 +0200375 .parse = uhid_hid_parse,
David Herrmann7c4003b2014-07-29 17:14:23 +0200376 .raw_request = uhid_hid_raw_request,
Frank Praznik596cfdd2014-01-22 13:49:43 -0500377 .output_report = uhid_hid_output_report,
David Herrmannd365c6c2012-06-10 15:16:18 +0200378};
Jason Gereckefc2237a2017-07-24 09:46:18 -0700379EXPORT_SYMBOL_GPL(uhid_hid_driver);
David Herrmannd365c6c2012-06-10 15:16:18 +0200380
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100381#ifdef CONFIG_COMPAT
382
383/* Apparently we haven't stepped on these rakes enough times yet. */
384struct uhid_create_req_compat {
385 __u8 name[128];
386 __u8 phys[64];
387 __u8 uniq[64];
388
389 compat_uptr_t rd_data;
390 __u16 rd_size;
391
392 __u16 bus;
393 __u32 vendor;
394 __u32 product;
395 __u32 version;
396 __u32 country;
397} __attribute__((__packed__));
398
399static int uhid_event_from_user(const char __user *buffer, size_t len,
400 struct uhid_event *event)
401{
Andy Lutomirski7365abb2016-03-22 14:25:24 -0700402 if (in_compat_syscall()) {
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100403 u32 type;
404
405 if (get_user(type, buffer))
406 return -EFAULT;
407
408 if (type == UHID_CREATE) {
409 /*
410 * This is our messed up request with compat pointer.
411 * It is largish (more than 256 bytes) so we better
412 * allocate it from the heap.
413 */
414 struct uhid_create_req_compat *compat;
415
David Herrmann80897aa2013-11-26 13:58:18 +0100416 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100417 if (!compat)
418 return -ENOMEM;
419
420 buffer += sizeof(type);
421 len -= sizeof(type);
422 if (copy_from_user(compat, buffer,
423 min(len, sizeof(*compat)))) {
424 kfree(compat);
425 return -EFAULT;
426 }
427
428 /* Shuffle the data over to proper structure */
429 event->type = type;
430
431 memcpy(event->u.create.name, compat->name,
432 sizeof(compat->name));
433 memcpy(event->u.create.phys, compat->phys,
434 sizeof(compat->phys));
435 memcpy(event->u.create.uniq, compat->uniq,
436 sizeof(compat->uniq));
437
438 event->u.create.rd_data = compat_ptr(compat->rd_data);
439 event->u.create.rd_size = compat->rd_size;
440
441 event->u.create.bus = compat->bus;
442 event->u.create.vendor = compat->vendor;
443 event->u.create.product = compat->product;
444 event->u.create.version = compat->version;
445 event->u.create.country = compat->country;
446
447 kfree(compat);
448 return 0;
449 }
450 /* All others can be copied directly */
451 }
452
453 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
454 return -EFAULT;
455
456 return 0;
457}
458#else
459static int uhid_event_from_user(const char __user *buffer, size_t len,
460 struct uhid_event *event)
461{
462 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
463 return -EFAULT;
464
465 return 0;
466}
467#endif
468
Petri Gynther45226432014-03-24 13:50:01 -0700469static int uhid_dev_create2(struct uhid_device *uhid,
470 const struct uhid_event *ev)
471{
472 struct hid_device *hid;
David Herrmann25be7fe2014-07-29 17:14:18 +0200473 size_t rd_size, len;
David Herrmann41c4a4642014-07-29 17:14:17 +0200474 void *rd_data;
Petri Gynther45226432014-03-24 13:50:01 -0700475 int ret;
476
477 if (uhid->running)
478 return -EALREADY;
479
David Herrmann41c4a4642014-07-29 17:14:17 +0200480 rd_size = ev->u.create2.rd_size;
481 if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
Petri Gynther45226432014-03-24 13:50:01 -0700482 return -EINVAL;
483
David Herrmann41c4a4642014-07-29 17:14:17 +0200484 rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
485 if (!rd_data)
Petri Gynther45226432014-03-24 13:50:01 -0700486 return -ENOMEM;
487
David Herrmann41c4a4642014-07-29 17:14:17 +0200488 uhid->rd_size = rd_size;
489 uhid->rd_data = rd_data;
490
Petri Gynther45226432014-03-24 13:50:01 -0700491 hid = hid_allocate_device();
492 if (IS_ERR(hid)) {
493 ret = PTR_ERR(hid);
494 goto err_free;
495 }
496
David Herrmann4d26d1d2018-11-14 14:16:42 +0100497 /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */
498 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
499 strncpy(hid->name, ev->u.create2.name, len);
500 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
501 strncpy(hid->phys, ev->u.create2.phys, len);
502 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
503 strncpy(hid->uniq, ev->u.create2.uniq, len);
Petri Gynther45226432014-03-24 13:50:01 -0700504
505 hid->ll_driver = &uhid_hid_driver;
506 hid->bus = ev->u.create2.bus;
507 hid->vendor = ev->u.create2.vendor;
508 hid->product = ev->u.create2.product;
509 hid->version = ev->u.create2.version;
510 hid->country = ev->u.create2.country;
511 hid->driver_data = uhid;
512 hid->dev.parent = uhid_misc.this_device;
513
514 uhid->hid = hid;
515 uhid->running = true;
516
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -0700517 /* Adding of a HID device is done through a worker, to allow HID drivers
518 * which use feature requests during .probe to work, without they would
519 * be blocked on devlock, which is held by uhid_char_write.
520 */
521 schedule_work(&uhid->worker);
Petri Gynther45226432014-03-24 13:50:01 -0700522
523 return 0;
524
Petri Gynther45226432014-03-24 13:50:01 -0700525err_free:
526 kfree(uhid->rd_data);
David Herrmann41c4a4642014-07-29 17:14:17 +0200527 uhid->rd_data = NULL;
528 uhid->rd_size = 0;
Petri Gynther45226432014-03-24 13:50:01 -0700529 return ret;
530}
531
David Herrmann56c47752014-07-29 17:14:16 +0200532static int uhid_dev_create(struct uhid_device *uhid,
533 struct uhid_event *ev)
534{
535 struct uhid_create_req orig;
536
537 orig = ev->u.create;
538
539 if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
540 return -EINVAL;
541 if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
542 return -EFAULT;
543
544 memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
545 memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
546 memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
547 ev->u.create2.rd_size = orig.rd_size;
548 ev->u.create2.bus = orig.bus;
549 ev->u.create2.vendor = orig.vendor;
550 ev->u.create2.product = orig.product;
551 ev->u.create2.version = orig.version;
552 ev->u.create2.country = orig.country;
553
554 return uhid_dev_create2(uhid, ev);
555}
556
David Herrmannd365c6c2012-06-10 15:16:18 +0200557static int uhid_dev_destroy(struct uhid_device *uhid)
558{
559 if (!uhid->running)
560 return -EINVAL;
561
562 uhid->running = false;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200563 wake_up_interruptible(&uhid->report_wait);
David Herrmannd365c6c2012-06-10 15:16:18 +0200564
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -0700565 cancel_work_sync(&uhid->worker);
566
David Herrmannd365c6c2012-06-10 15:16:18 +0200567 hid_destroy_device(uhid->hid);
568 kfree(uhid->rd_data);
569
570 return 0;
571}
572
David Herrmann5e87a362012-06-10 15:16:19 +0200573static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
574{
575 if (!uhid->running)
576 return -EINVAL;
577
578 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
579 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
580
581 return 0;
582}
583
Petri Gynther45226432014-03-24 13:50:01 -0700584static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
585{
586 if (!uhid->running)
587 return -EINVAL;
588
589 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
590 min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
591
592 return 0;
593}
594
David Herrmannfa71f322014-07-29 17:14:21 +0200595static int uhid_dev_get_report_reply(struct uhid_device *uhid,
596 struct uhid_event *ev)
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200597{
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200598 if (!uhid->running)
599 return -EINVAL;
600
David Herrmann11c22152014-07-29 17:14:24 +0200601 uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
602 return 0;
603}
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200604
David Herrmann11c22152014-07-29 17:14:24 +0200605static int uhid_dev_set_report_reply(struct uhid_device *uhid,
606 struct uhid_event *ev)
607{
608 if (!uhid->running)
609 return -EINVAL;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200610
David Herrmann11c22152014-07-29 17:14:24 +0200611 uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200612 return 0;
613}
614
David Herrmann1ccd7a22012-06-10 15:16:13 +0200615static int uhid_char_open(struct inode *inode, struct file *file)
616{
David Herrmannace3d862012-06-10 15:16:14 +0200617 struct uhid_device *uhid;
618
619 uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
620 if (!uhid)
621 return -ENOMEM;
622
David Herrmannd937ae52012-06-10 15:16:16 +0200623 mutex_init(&uhid->devlock);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200624 mutex_init(&uhid->report_lock);
David Herrmannace3d862012-06-10 15:16:14 +0200625 spin_lock_init(&uhid->qlock);
626 init_waitqueue_head(&uhid->waitq);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200627 init_waitqueue_head(&uhid->report_wait);
David Herrmannd365c6c2012-06-10 15:16:18 +0200628 uhid->running = false;
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -0700629 INIT_WORK(&uhid->worker, uhid_device_add_worker);
David Herrmannace3d862012-06-10 15:16:14 +0200630
631 file->private_data = uhid;
Kirill Smelkovc5bf68f2019-03-26 23:51:19 +0300632 stream_open(inode, file);
David Herrmannace3d862012-06-10 15:16:14 +0200633
David Herrmann1ccd7a22012-06-10 15:16:13 +0200634 return 0;
635}
636
637static int uhid_char_release(struct inode *inode, struct file *file)
638{
David Herrmannace3d862012-06-10 15:16:14 +0200639 struct uhid_device *uhid = file->private_data;
640 unsigned int i;
641
David Herrmannd365c6c2012-06-10 15:16:18 +0200642 uhid_dev_destroy(uhid);
643
David Herrmannace3d862012-06-10 15:16:14 +0200644 for (i = 0; i < UHID_BUFSIZE; ++i)
645 kfree(uhid->outq[i]);
646
647 kfree(uhid);
648
David Herrmann1ccd7a22012-06-10 15:16:13 +0200649 return 0;
650}
651
652static ssize_t uhid_char_read(struct file *file, char __user *buffer,
653 size_t count, loff_t *ppos)
654{
David Herrmannd937ae52012-06-10 15:16:16 +0200655 struct uhid_device *uhid = file->private_data;
656 int ret;
657 unsigned long flags;
658 size_t len;
659
660 /* they need at least the "type" member of uhid_event */
661 if (count < sizeof(__u32))
662 return -EINVAL;
663
664try_again:
665 if (file->f_flags & O_NONBLOCK) {
666 if (uhid->head == uhid->tail)
667 return -EAGAIN;
668 } else {
669 ret = wait_event_interruptible(uhid->waitq,
670 uhid->head != uhid->tail);
671 if (ret)
672 return ret;
673 }
674
675 ret = mutex_lock_interruptible(&uhid->devlock);
676 if (ret)
677 return ret;
678
679 if (uhid->head == uhid->tail) {
680 mutex_unlock(&uhid->devlock);
681 goto try_again;
682 } else {
683 len = min(count, sizeof(**uhid->outq));
Vinicius Costa Gomesadefb692012-07-14 18:59:25 -0300684 if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
David Herrmannd937ae52012-06-10 15:16:16 +0200685 ret = -EFAULT;
686 } else {
687 kfree(uhid->outq[uhid->tail]);
688 uhid->outq[uhid->tail] = NULL;
689
690 spin_lock_irqsave(&uhid->qlock, flags);
691 uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
692 spin_unlock_irqrestore(&uhid->qlock, flags);
693 }
694 }
695
696 mutex_unlock(&uhid->devlock);
697 return ret ? ret : len;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200698}
699
700static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
701 size_t count, loff_t *ppos)
702{
David Herrmann6664ef72012-06-10 15:16:17 +0200703 struct uhid_device *uhid = file->private_data;
704 int ret;
705 size_t len;
706
707 /* we need at least the "type" member of uhid_event */
708 if (count < sizeof(__u32))
709 return -EINVAL;
710
711 ret = mutex_lock_interruptible(&uhid->devlock);
712 if (ret)
713 return ret;
714
715 memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
716 len = min(count, sizeof(uhid->input_buf));
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100717
718 ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
719 if (ret)
David Herrmann6664ef72012-06-10 15:16:17 +0200720 goto unlock;
David Herrmann6664ef72012-06-10 15:16:17 +0200721
722 switch (uhid->input_buf.type) {
David Herrmannd365c6c2012-06-10 15:16:18 +0200723 case UHID_CREATE:
Eric Biggers8c01db72018-11-14 13:55:09 -0800724 /*
725 * 'struct uhid_create_req' contains a __user pointer which is
726 * copied from, so it's unsafe to allow this with elevated
727 * privileges (e.g. from a setuid binary) or via kernel_write().
728 */
729 if (file->f_cred != current_cred() || uaccess_kernel()) {
730 pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
731 task_tgid_vnr(current), current->comm);
732 ret = -EACCES;
733 goto unlock;
734 }
David Herrmannd365c6c2012-06-10 15:16:18 +0200735 ret = uhid_dev_create(uhid, &uhid->input_buf);
736 break;
Petri Gynther45226432014-03-24 13:50:01 -0700737 case UHID_CREATE2:
738 ret = uhid_dev_create2(uhid, &uhid->input_buf);
739 break;
David Herrmannd365c6c2012-06-10 15:16:18 +0200740 case UHID_DESTROY:
741 ret = uhid_dev_destroy(uhid);
742 break;
David Herrmann5e87a362012-06-10 15:16:19 +0200743 case UHID_INPUT:
744 ret = uhid_dev_input(uhid, &uhid->input_buf);
745 break;
Petri Gynther45226432014-03-24 13:50:01 -0700746 case UHID_INPUT2:
747 ret = uhid_dev_input2(uhid, &uhid->input_buf);
748 break;
David Herrmannfa71f322014-07-29 17:14:21 +0200749 case UHID_GET_REPORT_REPLY:
750 ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200751 break;
David Herrmann11c22152014-07-29 17:14:24 +0200752 case UHID_SET_REPORT_REPLY:
753 ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
754 break;
David Herrmann6664ef72012-06-10 15:16:17 +0200755 default:
756 ret = -EOPNOTSUPP;
757 }
758
759unlock:
760 mutex_unlock(&uhid->devlock);
761
762 /* return "count" not "len" to not confuse the caller */
763 return ret ? ret : count;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200764}
765
Al Viroafc9a422017-07-03 06:39:46 -0400766static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
David Herrmann1ccd7a22012-06-10 15:16:13 +0200767{
David Herrmann1f9dec12012-06-10 15:16:15 +0200768 struct uhid_device *uhid = file->private_data;
Jiri Kosina9e635c22020-01-10 15:32:51 +0100769 __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
David Herrmann1f9dec12012-06-10 15:16:15 +0200770
771 poll_wait(file, &uhid->waitq, wait);
772
773 if (uhid->head != uhid->tail)
Jiri Kosina9e635c22020-01-10 15:32:51 +0100774 mask |= EPOLLIN | EPOLLRDNORM;
David Herrmann1f9dec12012-06-10 15:16:15 +0200775
Jiri Kosina9e635c22020-01-10 15:32:51 +0100776 return mask;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200777}
778
779static const struct file_operations uhid_fops = {
780 .owner = THIS_MODULE,
781 .open = uhid_char_open,
782 .release = uhid_char_release,
783 .read = uhid_char_read,
784 .write = uhid_char_write,
785 .poll = uhid_char_poll,
786 .llseek = no_llseek,
787};
788
789static struct miscdevice uhid_misc = {
790 .fops = &uhid_fops,
David Herrmann19872d22013-09-09 18:33:54 +0200791 .minor = UHID_MINOR,
David Herrmann1ccd7a22012-06-10 15:16:13 +0200792 .name = UHID_NAME,
793};
PrasannaKumar Muralidharanca75d602016-08-25 22:30:49 +0530794module_misc_device(uhid_misc);
David Herrmann1ccd7a22012-06-10 15:16:13 +0200795
David Herrmann1ccd7a22012-06-10 15:16:13 +0200796MODULE_LICENSE("GPL");
797MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
798MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
David Herrmann19872d22013-09-09 18:33:54 +0200799MODULE_ALIAS_MISCDEV(UHID_MINOR);
Marcel Holtmann60cbd532013-09-01 11:02:46 -0700800MODULE_ALIAS("devname:" UHID_NAME);