blob: d78483a504c9f4921d70765a6ccf000d9eefc979 [file] [log] [blame]
David Härdeman4924a312014-04-03 20:34:28 -03001/* rc-ir-raw.c - handle IR pulse/space events
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -03002 *
Mauro Carvalho Chehab37e59f82014-02-07 08:03:07 -02003 * Copyright (C) 2010 by Mauro Carvalho Chehab
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -03004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
Paul Gortmaker35a24632011-08-01 15:26:38 -040015#include <linux/export.h>
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030016#include <linux/kthread.h>
Maxim Levitsky45a568f2010-07-31 11:59:16 -030017#include <linux/mutex.h>
Stephen Rothwelldff65de2011-07-29 15:34:32 +100018#include <linux/kmod.h>
David Härdeman724e2492010-04-08 13:10:00 -030019#include <linux/sched.h>
Mauro Carvalho Chehabf62de672010-11-09 23:09:57 -030020#include "rc-core-priv.h"
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030021
David Härdemanc2163692010-06-13 17:29:36 -030022/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
23static LIST_HEAD(ir_raw_client_list);
24
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -030025/* Used to handle IR raw handler extensions */
Maxim Levitsky45a568f2010-07-31 11:59:16 -030026static DEFINE_MUTEX(ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -030027static LIST_HEAD(ir_raw_handler_list);
Heiner Kallweit37e90a22016-09-27 16:48:47 -030028static atomic64_t available_protocols = ATOMIC64_INIT(0);
Mauro Carvalho Chehab93c312f2010-03-25 21:13:43 -030029
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030030static int ir_raw_event_thread(void *data)
David Härdeman724e2492010-04-08 13:10:00 -030031{
David Härdemane40b1122010-04-15 18:46:00 -030032 struct ir_raw_event ev;
David Härdemanc2163692010-06-13 17:29:36 -030033 struct ir_raw_handler *handler;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030034 struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
David Härdeman724e2492010-04-08 13:10:00 -030035
Heiner Kallweit74d47d72016-08-02 02:44:07 -030036 while (1) {
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030037 mutex_lock(&ir_raw_handler_lock);
Heiner Kallweit74d47d72016-08-02 02:44:07 -030038 while (kfifo_out(&raw->kfifo, &ev, 1)) {
39 list_for_each_entry(handler, &ir_raw_handler_list, list)
40 if (raw->dev->enabled_protocols &
41 handler->protocols || !handler->protocols)
42 handler->decode(raw->dev, ev);
43 raw->prev_ev = ev;
44 }
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030045 mutex_unlock(&ir_raw_handler_lock);
Heiner Kallweit74d47d72016-08-02 02:44:07 -030046
47 set_current_state(TASK_INTERRUPTIBLE);
48
49 if (kthread_should_stop()) {
50 __set_current_state(TASK_RUNNING);
51 break;
52 } else if (!kfifo_is_empty(&raw->kfifo))
53 set_current_state(TASK_RUNNING);
54
55 schedule();
David Härdemanc2163692010-06-13 17:29:36 -030056 }
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030057
58 return 0;
David Härdeman724e2492010-04-08 13:10:00 -030059}
60
David Härdeman724e2492010-04-08 13:10:00 -030061/**
62 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
David Härdemand8b4b582010-10-29 16:08:23 -030063 * @dev: the struct rc_dev device descriptor
David Härdemane40b1122010-04-15 18:46:00 -030064 * @ev: the struct ir_raw_event descriptor of the pulse/space
David Härdeman724e2492010-04-08 13:10:00 -030065 *
66 * This routine (which may be called from an interrupt context) stores a
67 * pulse/space duration for the raw ir decoding state machines. Pulses are
68 * signalled as positive values and spaces as negative values. A zero value
69 * will reset the decoding state machines.
70 */
David Härdemand8b4b582010-10-29 16:08:23 -030071int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030072{
David Härdemand8b4b582010-10-29 16:08:23 -030073 if (!dev->raw)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030074 return -EINVAL;
75
Mauro Carvalho Chehab74c47922010-10-20 11:56:50 -030076 IR_dprintk(2, "sample: (%05dus %s)\n",
David Härdemand8b4b582010-10-29 16:08:23 -030077 TO_US(ev->duration), TO_STR(ev->pulse));
Maxim Levitsky510fcb72010-07-31 11:59:15 -030078
Heiner Kallweit464254e2015-11-27 20:02:38 -020079 if (!kfifo_put(&dev->raw->kfifo, *ev)) {
80 dev_err(&dev->dev, "IR event FIFO is full!\n");
81 return -ENOSPC;
82 }
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030083
David Härdeman724e2492010-04-08 13:10:00 -030084 return 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030085}
86EXPORT_SYMBOL_GPL(ir_raw_event_store);
87
David Härdeman724e2492010-04-08 13:10:00 -030088/**
89 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
David Härdemand8b4b582010-10-29 16:08:23 -030090 * @dev: the struct rc_dev device descriptor
Sean Young86fe1ac2017-08-07 08:38:10 -040091 * @pulse: true for pulse, false for space
David Härdeman724e2492010-04-08 13:10:00 -030092 *
93 * This routine (which may be called from an interrupt context) is used to
94 * store the beginning of an ir pulse or space (or the start/end of ir
95 * reception) for the raw ir decoding state machines. This is used by
96 * hardware which does not provide durations directly but only interrupts
97 * (or similar events) on state change.
98 */
Sean Young86fe1ac2017-08-07 08:38:10 -040099int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300100{
David Härdeman724e2492010-04-08 13:10:00 -0300101 ktime_t now;
Mauro Carvalho Chehab83587832011-01-20 18:16:50 -0300102 DEFINE_IR_RAW_EVENT(ev);
David Härdeman724e2492010-04-08 13:10:00 -0300103 int rc = 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300104
David Härdemand8b4b582010-10-29 16:08:23 -0300105 if (!dev->raw)
David Härdeman724e2492010-04-08 13:10:00 -0300106 return -EINVAL;
107
108 now = ktime_get();
Jasmin Jessich800846c2017-08-25 05:59:41 -0400109 ev.duration = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
Sean Young86fe1ac2017-08-07 08:38:10 -0400110 ev.pulse = !pulse;
David Härdeman724e2492010-04-08 13:10:00 -0300111
Sean Young86fe1ac2017-08-07 08:38:10 -0400112 rc = ir_raw_event_store(dev, &ev);
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300113
David Härdemand8b4b582010-10-29 16:08:23 -0300114 dev->raw->last_event = now;
Sean Younge5e26432017-08-06 15:25:52 -0400115
Sean Young48b2de12017-08-07 08:30:18 -0400116 /* timer could be set to timeout (125ms by default) */
117 if (!timer_pending(&dev->raw->edge_handle) ||
118 time_after(dev->raw->edge_handle.expires,
119 jiffies + msecs_to_jiffies(15))) {
Sean Younge5e26432017-08-06 15:25:52 -0400120 mod_timer(&dev->raw->edge_handle,
121 jiffies + msecs_to_jiffies(15));
Sean Young48b2de12017-08-07 08:30:18 -0400122 }
Sean Younge5e26432017-08-06 15:25:52 -0400123
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300124 return rc;
125}
David Härdeman724e2492010-04-08 13:10:00 -0300126EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
127
128/**
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300129 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
David Härdemand8b4b582010-10-29 16:08:23 -0300130 * @dev: the struct rc_dev device descriptor
Mauro Carvalho Chehabc4365922017-11-27 10:19:38 -0500131 * @ev: the event that has occurred
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300132 *
133 * This routine (which may be called from an interrupt context) works
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300134 * in similar manner to ir_raw_event_store_edge.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300135 * This routine is intended for devices with limited internal buffer
Sean Youngb83bfd12012-08-13 08:59:47 -0300136 * It automerges samples of same type, and handles timeouts. Returns non-zero
137 * if the event was added, and zero if the event was ignored due to idle
138 * processing.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300139 */
David Härdemand8b4b582010-10-29 16:08:23 -0300140int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300141{
David Härdemand8b4b582010-10-29 16:08:23 -0300142 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300143 return -EINVAL;
144
145 /* Ignore spaces in idle mode */
David Härdemand8b4b582010-10-29 16:08:23 -0300146 if (dev->idle && !ev->pulse)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300147 return 0;
David Härdemand8b4b582010-10-29 16:08:23 -0300148 else if (dev->idle)
149 ir_raw_event_set_idle(dev, false);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300150
David Härdemand8b4b582010-10-29 16:08:23 -0300151 if (!dev->raw->this_ev.duration)
152 dev->raw->this_ev = *ev;
153 else if (ev->pulse == dev->raw->this_ev.pulse)
154 dev->raw->this_ev.duration += ev->duration;
155 else {
156 ir_raw_event_store(dev, &dev->raw->this_ev);
157 dev->raw->this_ev = *ev;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300158 }
159
160 /* Enter idle mode if nessesary */
David Härdemand8b4b582010-10-29 16:08:23 -0300161 if (!ev->pulse && dev->timeout &&
162 dev->raw->this_ev.duration >= dev->timeout)
163 ir_raw_event_set_idle(dev, true);
164
Sean Youngb83bfd12012-08-13 08:59:47 -0300165 return 1;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300166}
167EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
168
Maxim Levitsky46519182010-10-16 19:56:28 -0300169/**
David Härdemand8b4b582010-10-29 16:08:23 -0300170 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
171 * @dev: the struct rc_dev device descriptor
172 * @idle: whether the device is idle or not
Maxim Levitsky46519182010-10-16 19:56:28 -0300173 */
David Härdemand8b4b582010-10-29 16:08:23 -0300174void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300175{
David Härdemand8b4b582010-10-29 16:08:23 -0300176 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300177 return;
178
Maxim Levitsky46519182010-10-16 19:56:28 -0300179 IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300180
181 if (idle) {
David Härdemand8b4b582010-10-29 16:08:23 -0300182 dev->raw->this_ev.timeout = true;
183 ir_raw_event_store(dev, &dev->raw->this_ev);
184 init_ir_raw_event(&dev->raw->this_ev);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300185 }
Maxim Levitsky46519182010-10-16 19:56:28 -0300186
David Härdemand8b4b582010-10-29 16:08:23 -0300187 if (dev->s_idle)
188 dev->s_idle(dev, idle);
189
190 dev->idle = idle;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300191}
192EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
193
194/**
David Härdeman724e2492010-04-08 13:10:00 -0300195 * ir_raw_event_handle() - schedules the decoding of stored ir data
David Härdemand8b4b582010-10-29 16:08:23 -0300196 * @dev: the struct rc_dev device descriptor
David Härdeman724e2492010-04-08 13:10:00 -0300197 *
David Härdemand8b4b582010-10-29 16:08:23 -0300198 * This routine will tell rc-core to start decoding stored ir data.
David Härdeman724e2492010-04-08 13:10:00 -0300199 */
David Härdemand8b4b582010-10-29 16:08:23 -0300200void ir_raw_event_handle(struct rc_dev *dev)
David Härdeman724e2492010-04-08 13:10:00 -0300201{
Sean Young963761a2017-05-24 06:24:51 -0300202 if (!dev->raw || !dev->raw->thread)
David Härdeman724e2492010-04-08 13:10:00 -0300203 return;
204
David Härdemand8b4b582010-10-29 16:08:23 -0300205 wake_up_process(dev->raw->thread);
David Härdeman724e2492010-04-08 13:10:00 -0300206}
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300207EXPORT_SYMBOL_GPL(ir_raw_event_handle);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300208
David Härdeman667c9eb2010-06-13 17:29:31 -0300209/* used internally by the sysfs interface */
210u64
Randy Dunlap2dbd61b2011-01-09 00:53:53 -0300211ir_raw_get_allowed_protocols(void)
David Härdeman667c9eb2010-06-13 17:29:31 -0300212{
Heiner Kallweit37e90a22016-09-27 16:48:47 -0300213 return atomic64_read(&available_protocols);
David Härdeman667c9eb2010-06-13 17:29:31 -0300214}
215
Sean Young6d741bf2017-08-07 16:20:58 -0400216static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
David Härdemanda6e1622014-04-03 20:32:16 -0300217{
218 /* the caller will update dev->enabled_protocols */
219 return 0;
220}
221
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200222static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
223{
224 mutex_lock(&dev->lock);
225 dev->enabled_protocols &= ~protocols;
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200226 mutex_unlock(&dev->lock);
227}
228
James Hogan3875233d2015-03-31 14:48:06 -0300229/**
Antti Seppälä844a4f42015-03-31 14:48:07 -0300230 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
231 * @ev: Pointer to pointer to next free event. *@ev is incremented for
232 * each raw event filled.
233 * @max: Maximum number of raw events to fill.
234 * @timings: Manchester modulation timings.
235 * @n: Number of bits of data.
236 * @data: Data bits to encode.
237 *
238 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
239 * modulation with the timing characteristics described by @timings, writing up
240 * to @max raw IR events using the *@ev pointer.
241 *
242 * Returns: 0 on success.
243 * -ENOBUFS if there isn't enough space in the array to fit the
244 * full encoded data. In this case all @max events will have been
245 * written.
246 */
247int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
248 const struct ir_raw_timings_manchester *timings,
Sean Youngb73bc162017-02-11 20:33:38 -0200249 unsigned int n, u64 data)
Antti Seppälä844a4f42015-03-31 14:48:07 -0300250{
251 bool need_pulse;
Sean Youngb73bc162017-02-11 20:33:38 -0200252 u64 i;
Antti Seppälä844a4f42015-03-31 14:48:07 -0300253 int ret = -ENOBUFS;
254
Sean Youngb73bc162017-02-11 20:33:38 -0200255 i = BIT_ULL(n - 1);
Antti Seppälä844a4f42015-03-31 14:48:07 -0300256
257 if (timings->leader) {
258 if (!max--)
259 return ret;
260 if (timings->pulse_space_start) {
261 init_ir_raw_event_duration((*ev)++, 1, timings->leader);
262
263 if (!max--)
264 return ret;
265 init_ir_raw_event_duration((*ev), 0, timings->leader);
266 } else {
267 init_ir_raw_event_duration((*ev), 1, timings->leader);
268 }
269 i >>= 1;
270 } else {
271 /* continue existing signal */
272 --(*ev);
273 }
274 /* from here on *ev will point to the last event rather than the next */
275
276 while (n && i > 0) {
277 need_pulse = !(data & i);
278 if (timings->invert)
279 need_pulse = !need_pulse;
280 if (need_pulse == !!(*ev)->pulse) {
281 (*ev)->duration += timings->clock;
282 } else {
283 if (!max--)
284 goto nobufs;
285 init_ir_raw_event_duration(++(*ev), need_pulse,
286 timings->clock);
287 }
288
289 if (!max--)
290 goto nobufs;
291 init_ir_raw_event_duration(++(*ev), !need_pulse,
292 timings->clock);
293 i >>= 1;
294 }
295
296 if (timings->trailer_space) {
297 if (!(*ev)->pulse)
298 (*ev)->duration += timings->trailer_space;
299 else if (!max--)
300 goto nobufs;
301 else
302 init_ir_raw_event_duration(++(*ev), 0,
303 timings->trailer_space);
304 }
305
306 ret = 0;
307nobufs:
308 /* point to the next event rather than last event before returning */
309 ++(*ev);
310 return ret;
311}
312EXPORT_SYMBOL(ir_raw_gen_manchester);
313
314/**
James Hogancaec0982014-03-14 20:04:12 -0300315 * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
316 * @ev: Pointer to pointer to next free event. *@ev is incremented for
317 * each raw event filled.
318 * @max: Maximum number of raw events to fill.
319 * @timings: Pulse distance modulation timings.
320 * @n: Number of bits of data.
321 * @data: Data bits to encode.
322 *
323 * Encodes the @n least significant bits of @data using pulse-distance
324 * modulation with the timing characteristics described by @timings, writing up
325 * to @max raw IR events using the *@ev pointer.
326 *
327 * Returns: 0 on success.
328 * -ENOBUFS if there isn't enough space in the array to fit the
329 * full encoded data. In this case all @max events will have been
330 * written.
331 */
332int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
333 const struct ir_raw_timings_pd *timings,
334 unsigned int n, u64 data)
335{
336 int i;
337 int ret;
338 unsigned int space;
339
340 if (timings->header_pulse) {
341 ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
342 timings->header_space);
343 if (ret)
344 return ret;
345 }
346
347 if (timings->msb_first) {
348 for (i = n - 1; i >= 0; --i) {
349 space = timings->bit_space[(data >> i) & 1];
350 ret = ir_raw_gen_pulse_space(ev, &max,
351 timings->bit_pulse,
352 space);
353 if (ret)
354 return ret;
355 }
356 } else {
357 for (i = 0; i < n; ++i, data >>= 1) {
358 space = timings->bit_space[data & 1];
359 ret = ir_raw_gen_pulse_space(ev, &max,
360 timings->bit_pulse,
361 space);
362 if (ret)
363 return ret;
364 }
365 }
366
367 ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
368 timings->trailer_space);
369 return ret;
370}
371EXPORT_SYMBOL(ir_raw_gen_pd);
372
373/**
Sean Young103293b2016-12-06 18:33:57 -0200374 * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
375 * @ev: Pointer to pointer to next free event. *@ev is incremented for
376 * each raw event filled.
377 * @max: Maximum number of raw events to fill.
378 * @timings: Pulse distance modulation timings.
379 * @n: Number of bits of data.
380 * @data: Data bits to encode.
381 *
382 * Encodes the @n least significant bits of @data using space-distance
383 * modulation with the timing characteristics described by @timings, writing up
384 * to @max raw IR events using the *@ev pointer.
385 *
386 * Returns: 0 on success.
387 * -ENOBUFS if there isn't enough space in the array to fit the
388 * full encoded data. In this case all @max events will have been
389 * written.
390 */
391int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
392 const struct ir_raw_timings_pl *timings,
393 unsigned int n, u64 data)
394{
395 int i;
396 int ret = -ENOBUFS;
397 unsigned int pulse;
398
399 if (!max--)
400 return ret;
401
402 init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
403
404 if (timings->msb_first) {
405 for (i = n - 1; i >= 0; --i) {
406 if (!max--)
407 return ret;
408 init_ir_raw_event_duration((*ev)++, 0,
409 timings->bit_space);
410 if (!max--)
411 return ret;
412 pulse = timings->bit_pulse[(data >> i) & 1];
413 init_ir_raw_event_duration((*ev)++, 1, pulse);
414 }
415 } else {
416 for (i = 0; i < n; ++i, data >>= 1) {
417 if (!max--)
418 return ret;
419 init_ir_raw_event_duration((*ev)++, 0,
420 timings->bit_space);
421 if (!max--)
422 return ret;
423 pulse = timings->bit_pulse[data & 1];
424 init_ir_raw_event_duration((*ev)++, 1, pulse);
425 }
426 }
427
428 if (!max--)
429 return ret;
430
431 init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
432
433 return 0;
434}
435EXPORT_SYMBOL(ir_raw_gen_pl);
436
437/**
James Hogan3875233d2015-03-31 14:48:06 -0300438 * ir_raw_encode_scancode() - Encode a scancode as raw events
439 *
440 * @protocol: protocol
441 * @scancode: scancode filter describing a single scancode
442 * @events: array of raw events to write into
443 * @max: max number of raw events
444 *
445 * Attempts to encode the scancode as raw events.
446 *
447 * Returns: The number of events written.
448 * -ENOBUFS if there isn't enough space in the array to fit the
449 * encoding. In this case all @max events will have been written.
450 * -EINVAL if the scancode is ambiguous or invalid, or if no
451 * compatible encoder was found.
452 */
Sean Young6d741bf2017-08-07 16:20:58 -0400453int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
James Hogan3875233d2015-03-31 14:48:06 -0300454 struct ir_raw_event *events, unsigned int max)
455{
456 struct ir_raw_handler *handler;
457 int ret = -EINVAL;
458 u64 mask = 1ULL << protocol;
459
460 mutex_lock(&ir_raw_handler_lock);
461 list_for_each_entry(handler, &ir_raw_handler_list, list) {
462 if (handler->protocols & mask && handler->encode) {
463 ret = handler->encode(protocol, scancode, events, max);
464 if (ret >= 0 || ret == -ENOBUFS)
465 break;
466 }
467 }
468 mutex_unlock(&ir_raw_handler_lock);
469
470 return ret;
471}
472EXPORT_SYMBOL(ir_raw_encode_scancode);
473
Kees Cookb17ec782017-10-24 11:23:14 -0400474static void edge_handle(struct timer_list *t)
Sean Younge5e26432017-08-06 15:25:52 -0400475{
Kees Cookb17ec782017-10-24 11:23:14 -0400476 struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
477 struct rc_dev *dev = raw->dev;
Jasmin Jessich800846c2017-08-25 05:59:41 -0400478 ktime_t interval = ktime_sub(ktime_get(), dev->raw->last_event);
Sean Young48b2de12017-08-07 08:30:18 -0400479
Jasmin Jessich800846c2017-08-25 05:59:41 -0400480 if (ktime_to_ns(interval) >= dev->timeout) {
Sean Young48b2de12017-08-07 08:30:18 -0400481 DEFINE_IR_RAW_EVENT(ev);
482
483 ev.timeout = true;
Jasmin Jessich800846c2017-08-25 05:59:41 -0400484 ev.duration = ktime_to_ns(interval);
Sean Young48b2de12017-08-07 08:30:18 -0400485
486 ir_raw_event_store(dev, &ev);
487 } else {
488 mod_timer(&dev->raw->edge_handle,
Jasmin Jessich800846c2017-08-25 05:59:41 -0400489 jiffies + nsecs_to_jiffies(dev->timeout -
490 ktime_to_ns(interval)));
Sean Young48b2de12017-08-07 08:30:18 -0400491 }
Sean Younge5e26432017-08-06 15:25:52 -0400492
493 ir_raw_event_handle(dev);
494}
495
David Härdeman667c9eb2010-06-13 17:29:31 -0300496/*
497 * Used to (un)register raw event clients
498 */
David Härdemanf56928a2017-05-03 07:04:00 -0300499int ir_raw_event_prepare(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300500{
David Härdemanf56928a2017-05-03 07:04:00 -0300501 static bool raw_init; /* 'false' default value, raw decoders loaded? */
David Härdeman667c9eb2010-06-13 17:29:31 -0300502
David Härdemand8b4b582010-10-29 16:08:23 -0300503 if (!dev)
504 return -EINVAL;
505
David Härdemanf56928a2017-05-03 07:04:00 -0300506 if (!raw_init) {
507 request_module("ir-lirc-codec");
508 raw_init = true;
509 }
510
David Härdemand8b4b582010-10-29 16:08:23 -0300511 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
512 if (!dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300513 return -ENOMEM;
514
David Härdemand8b4b582010-10-29 16:08:23 -0300515 dev->raw->dev = dev;
David Härdemanda6e1622014-04-03 20:32:16 -0300516 dev->change_protocol = change_protocol;
Kees Cookb17ec782017-10-24 11:23:14 -0400517 timer_setup(&dev->raw->edge_handle, edge_handle, 0);
Heiner Kallweit464254e2015-11-27 20:02:38 -0200518 INIT_KFIFO(dev->raw->kfifo);
David Härdeman667c9eb2010-06-13 17:29:31 -0300519
David Härdemanf56928a2017-05-03 07:04:00 -0300520 return 0;
521}
522
523int ir_raw_event_register(struct rc_dev *dev)
524{
525 struct ir_raw_handler *handler;
526 struct task_struct *thread;
527
Andi Shytid5083672016-12-16 04:12:16 -0200528 /*
529 * raw transmitters do not need any event registration
530 * because the event is coming from userspace
531 */
532 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
Sean Young963761a2017-05-24 06:24:51 -0300533 thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
534 dev->minor);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300535
David Härdemanf56928a2017-05-03 07:04:00 -0300536 if (IS_ERR(thread))
537 return PTR_ERR(thread);
Sean Young963761a2017-05-24 06:24:51 -0300538
539 dev->raw->thread = thread;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300540 }
541
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300542 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300543 list_add_tail(&dev->raw->list, &ir_raw_client_list);
David Härdemanc2163692010-06-13 17:29:36 -0300544 list_for_each_entry(handler, &ir_raw_handler_list, list)
545 if (handler->raw_register)
David Härdemand8b4b582010-10-29 16:08:23 -0300546 handler->raw_register(dev);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300547 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300548
David Härdemanc2163692010-06-13 17:29:36 -0300549 return 0;
David Härdemanf56928a2017-05-03 07:04:00 -0300550}
David Härdemand8b4b582010-10-29 16:08:23 -0300551
David Härdemanf56928a2017-05-03 07:04:00 -0300552void ir_raw_event_free(struct rc_dev *dev)
553{
554 if (!dev)
555 return;
556
David Härdemand8b4b582010-10-29 16:08:23 -0300557 kfree(dev->raw);
558 dev->raw = NULL;
David Härdeman667c9eb2010-06-13 17:29:31 -0300559}
560
David Härdemand8b4b582010-10-29 16:08:23 -0300561void ir_raw_event_unregister(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300562{
David Härdemanc2163692010-06-13 17:29:36 -0300563 struct ir_raw_handler *handler;
David Härdeman667c9eb2010-06-13 17:29:31 -0300564
David Härdemand8b4b582010-10-29 16:08:23 -0300565 if (!dev || !dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300566 return;
567
David Härdemand8b4b582010-10-29 16:08:23 -0300568 kthread_stop(dev->raw->thread);
Sean Younge5e26432017-08-06 15:25:52 -0400569 del_timer_sync(&dev->raw->edge_handle);
David Härdemanc2163692010-06-13 17:29:36 -0300570
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300571 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300572 list_del(&dev->raw->list);
David Härdemanc2163692010-06-13 17:29:36 -0300573 list_for_each_entry(handler, &ir_raw_handler_list, list)
574 if (handler->raw_unregister)
David Härdemand8b4b582010-10-29 16:08:23 -0300575 handler->raw_unregister(dev);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300576 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300577
David Härdemanf56928a2017-05-03 07:04:00 -0300578 ir_raw_event_free(dev);
David Härdeman667c9eb2010-06-13 17:29:31 -0300579}
580
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300581/*
582 * Extension interface - used to register the IR decoders
583 */
584
585int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
586{
David Härdemanc2163692010-06-13 17:29:36 -0300587 struct ir_raw_event_ctrl *raw;
588
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300589 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300590 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
David Härdemanc2163692010-06-13 17:29:36 -0300591 if (ir_raw_handler->raw_register)
592 list_for_each_entry(raw, &ir_raw_client_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -0300593 ir_raw_handler->raw_register(raw->dev);
Heiner Kallweit37e90a22016-09-27 16:48:47 -0300594 atomic64_or(ir_raw_handler->protocols, &available_protocols);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300595 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300596
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300597 return 0;
598}
599EXPORT_SYMBOL(ir_raw_handler_register);
600
601void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
602{
David Härdemanc2163692010-06-13 17:29:36 -0300603 struct ir_raw_event_ctrl *raw;
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200604 u64 protocols = ir_raw_handler->protocols;
David Härdemanc2163692010-06-13 17:29:36 -0300605
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300606 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300607 list_del(&ir_raw_handler->list);
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200608 list_for_each_entry(raw, &ir_raw_client_list, list) {
609 ir_raw_disable_protocols(raw->dev, protocols);
610 if (ir_raw_handler->raw_unregister)
David Härdemand8b4b582010-10-29 16:08:23 -0300611 ir_raw_handler->raw_unregister(raw->dev);
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200612 }
Heiner Kallweit37e90a22016-09-27 16:48:47 -0300613 atomic64_andnot(protocols, &available_protocols);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300614 mutex_unlock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300615}
616EXPORT_SYMBOL(ir_raw_handler_unregister);