blob: c65bba4ec473907558554fab0e2f4d51e7b65d87 [file] [log] [blame]
Mauro Carvalho Chehab20835282017-12-01 08:47:08 -05001// SPDX-License-Identifier: GPL-2.0
2// rc-ir-raw.c - handle IR pulse/space events
3//
4// Copyright (C) 2010 by Mauro Carvalho Chehab
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -03005
Paul Gortmaker35a24632011-08-01 15:26:38 -04006#include <linux/export.h>
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -03007#include <linux/kthread.h>
Maxim Levitsky45a568f2010-07-31 11:59:16 -03008#include <linux/mutex.h>
Stephen Rothwelldff65de2011-07-29 15:34:32 +10009#include <linux/kmod.h>
David Härdeman724e2492010-04-08 13:10:00 -030010#include <linux/sched.h>
Mauro Carvalho Chehabf62de672010-11-09 23:09:57 -030011#include "rc-core-priv.h"
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030012
David Härdemanc2163692010-06-13 17:29:36 -030013/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
14static LIST_HEAD(ir_raw_client_list);
15
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -030016/* Used to handle IR raw handler extensions */
Sean Youngf4364dc2018-05-27 12:24:09 +010017DEFINE_MUTEX(ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -030018static LIST_HEAD(ir_raw_handler_list);
Heiner Kallweit37e90a22016-09-27 16:48:47 -030019static atomic64_t available_protocols = ATOMIC64_INIT(0);
Mauro Carvalho Chehab93c312f2010-03-25 21:13:43 -030020
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030021static int ir_raw_event_thread(void *data)
David Härdeman724e2492010-04-08 13:10:00 -030022{
David Härdemane40b1122010-04-15 18:46:00 -030023 struct ir_raw_event ev;
David Härdemanc2163692010-06-13 17:29:36 -030024 struct ir_raw_handler *handler;
Sean Young48231f22018-05-10 06:11:47 -040025 struct ir_raw_event_ctrl *raw = data;
26 struct rc_dev *dev = raw->dev;
David Härdeman724e2492010-04-08 13:10:00 -030027
Heiner Kallweit74d47d72016-08-02 02:44:07 -030028 while (1) {
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030029 mutex_lock(&ir_raw_handler_lock);
Heiner Kallweit74d47d72016-08-02 02:44:07 -030030 while (kfifo_out(&raw->kfifo, &ev, 1)) {
Sean Young48231f22018-05-10 06:11:47 -040031 if (is_timing_event(ev)) {
32 if (ev.duration == 0)
Sean Young0ca54b22018-06-26 11:03:18 -040033 dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
Sean Young48231f22018-05-10 06:11:47 -040034 if (is_timing_event(raw->prev_ev) &&
35 !is_transition(&ev, &raw->prev_ev))
Sean Young0ca54b22018-06-26 11:03:18 -040036 dev_warn_once(&dev->dev, "two consecutive events of type %s",
37 TO_STR(ev.pulse));
Sean Young48231f22018-05-10 06:11:47 -040038 if (raw->prev_ev.reset && ev.pulse == 0)
Sean Young0ca54b22018-06-26 11:03:18 -040039 dev_warn_once(&dev->dev, "timing event after reset should be pulse");
Sean Young48231f22018-05-10 06:11:47 -040040 }
Heiner Kallweit74d47d72016-08-02 02:44:07 -030041 list_for_each_entry(handler, &ir_raw_handler_list, list)
Sean Young48231f22018-05-10 06:11:47 -040042 if (dev->enabled_protocols &
Heiner Kallweit74d47d72016-08-02 02:44:07 -030043 handler->protocols || !handler->protocols)
Sean Young48231f22018-05-10 06:11:47 -040044 handler->decode(dev, ev);
Sean Young75992a42020-08-24 21:10:45 +020045 lirc_raw_event(dev, ev);
Heiner Kallweit74d47d72016-08-02 02:44:07 -030046 raw->prev_ev = ev;
47 }
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030048 mutex_unlock(&ir_raw_handler_lock);
Heiner Kallweit74d47d72016-08-02 02:44:07 -030049
50 set_current_state(TASK_INTERRUPTIBLE);
51
52 if (kthread_should_stop()) {
53 __set_current_state(TASK_RUNNING);
54 break;
55 } else if (!kfifo_is_empty(&raw->kfifo))
56 set_current_state(TASK_RUNNING);
57
58 schedule();
David Härdemanc2163692010-06-13 17:29:36 -030059 }
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030060
61 return 0;
David Härdeman724e2492010-04-08 13:10:00 -030062}
63
David Härdeman724e2492010-04-08 13:10:00 -030064/**
65 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
David Härdemand8b4b582010-10-29 16:08:23 -030066 * @dev: the struct rc_dev device descriptor
David Härdemane40b1122010-04-15 18:46:00 -030067 * @ev: the struct ir_raw_event descriptor of the pulse/space
David Härdeman724e2492010-04-08 13:10:00 -030068 *
69 * This routine (which may be called from an interrupt context) stores a
70 * pulse/space duration for the raw ir decoding state machines. Pulses are
71 * signalled as positive values and spaces as negative values. A zero value
72 * will reset the decoding state machines.
73 */
David Härdemand8b4b582010-10-29 16:08:23 -030074int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030075{
David Härdemand8b4b582010-10-29 16:08:23 -030076 if (!dev->raw)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030077 return -EINVAL;
78
Sean Young1f17f682018-02-12 07:27:50 -050079 dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
Sean Young528222d82020-08-23 19:23:05 +020080 ev->duration, TO_STR(ev->pulse));
Maxim Levitsky510fcb72010-07-31 11:59:15 -030081
Heiner Kallweit464254e2015-11-27 20:02:38 -020082 if (!kfifo_put(&dev->raw->kfifo, *ev)) {
83 dev_err(&dev->dev, "IR event FIFO is full!\n");
84 return -ENOSPC;
85 }
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030086
David Härdeman724e2492010-04-08 13:10:00 -030087 return 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030088}
89EXPORT_SYMBOL_GPL(ir_raw_event_store);
90
David Härdeman724e2492010-04-08 13:10:00 -030091/**
92 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
David Härdemand8b4b582010-10-29 16:08:23 -030093 * @dev: the struct rc_dev device descriptor
Sean Young86fe1ac2017-08-07 08:38:10 -040094 * @pulse: true for pulse, false for space
David Härdeman724e2492010-04-08 13:10:00 -030095 *
96 * This routine (which may be called from an interrupt context) is used to
97 * store the beginning of an ir pulse or space (or the start/end of ir
98 * reception) for the raw ir decoding state machines. This is used by
99 * hardware which does not provide durations directly but only interrupts
100 * (or similar events) on state change.
101 */
Sean Young86fe1ac2017-08-07 08:38:10 -0400102int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300103{
David Härdeman724e2492010-04-08 13:10:00 -0300104 ktime_t now;
Sean Young183e19f2018-08-21 15:57:52 -0400105 struct ir_raw_event ev = {};
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300106
David Härdemand8b4b582010-10-29 16:08:23 -0300107 if (!dev->raw)
David Härdeman724e2492010-04-08 13:10:00 -0300108 return -EINVAL;
109
110 now = ktime_get();
Sean Young528222d82020-08-23 19:23:05 +0200111 ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event));
Sean Young86fe1ac2017-08-07 08:38:10 -0400112 ev.pulse = !pulse;
David Härdeman724e2492010-04-08 13:10:00 -0300113
Sean Young8d7a77c2018-03-08 09:42:44 -0500114 return ir_raw_event_store_with_timeout(dev, &ev);
115}
116EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
117
118/*
119 * ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw
120 * ir decoders, schedule decoding and
121 * timeout
122 * @dev: the struct rc_dev device descriptor
123 * @ev: the struct ir_raw_event descriptor of the pulse/space
124 *
125 * This routine (which may be called from an interrupt context) stores a
126 * pulse/space duration for the raw ir decoding state machines, schedules
127 * decoding and generates a timeout.
128 */
129int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev)
130{
131 ktime_t now;
132 int rc = 0;
133
134 if (!dev->raw)
135 return -EINVAL;
136
137 now = ktime_get();
138
Sean Younge3e389f2018-02-14 10:26:17 -0500139 spin_lock(&dev->raw->edge_spinlock);
Sean Young8d7a77c2018-03-08 09:42:44 -0500140 rc = ir_raw_event_store(dev, ev);
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300141
David Härdemand8b4b582010-10-29 16:08:23 -0300142 dev->raw->last_event = now;
Sean Younge5e26432017-08-06 15:25:52 -0400143
Sean Young48b2de12017-08-07 08:30:18 -0400144 /* timer could be set to timeout (125ms by default) */
145 if (!timer_pending(&dev->raw->edge_handle) ||
146 time_after(dev->raw->edge_handle.expires,
147 jiffies + msecs_to_jiffies(15))) {
Sean Younge5e26432017-08-06 15:25:52 -0400148 mod_timer(&dev->raw->edge_handle,
149 jiffies + msecs_to_jiffies(15));
Sean Young48b2de12017-08-07 08:30:18 -0400150 }
Sean Younge3e389f2018-02-14 10:26:17 -0500151 spin_unlock(&dev->raw->edge_spinlock);
Sean Younge5e26432017-08-06 15:25:52 -0400152
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300153 return rc;
154}
Sean Young8d7a77c2018-03-08 09:42:44 -0500155EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout);
David Härdeman724e2492010-04-08 13:10:00 -0300156
157/**
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300158 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
David Härdemand8b4b582010-10-29 16:08:23 -0300159 * @dev: the struct rc_dev device descriptor
Mauro Carvalho Chehabc4365922017-11-27 10:19:38 -0500160 * @ev: the event that has occurred
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300161 *
162 * This routine (which may be called from an interrupt context) works
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300163 * in similar manner to ir_raw_event_store_edge.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300164 * This routine is intended for devices with limited internal buffer
Sean Youngb83bfd12012-08-13 08:59:47 -0300165 * It automerges samples of same type, and handles timeouts. Returns non-zero
166 * if the event was added, and zero if the event was ignored due to idle
167 * processing.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300168 */
David Härdemand8b4b582010-10-29 16:08:23 -0300169int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300170{
David Härdemand8b4b582010-10-29 16:08:23 -0300171 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300172 return -EINVAL;
173
174 /* Ignore spaces in idle mode */
David Härdemand8b4b582010-10-29 16:08:23 -0300175 if (dev->idle && !ev->pulse)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300176 return 0;
David Härdemand8b4b582010-10-29 16:08:23 -0300177 else if (dev->idle)
178 ir_raw_event_set_idle(dev, false);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300179
David Härdemand8b4b582010-10-29 16:08:23 -0300180 if (!dev->raw->this_ev.duration)
181 dev->raw->this_ev = *ev;
182 else if (ev->pulse == dev->raw->this_ev.pulse)
183 dev->raw->this_ev.duration += ev->duration;
184 else {
185 ir_raw_event_store(dev, &dev->raw->this_ev);
186 dev->raw->this_ev = *ev;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300187 }
188
Mauro Carvalho Chehab04ad3012019-02-18 14:29:01 -0500189 /* Enter idle mode if necessary */
David Härdemand8b4b582010-10-29 16:08:23 -0300190 if (!ev->pulse && dev->timeout &&
191 dev->raw->this_ev.duration >= dev->timeout)
192 ir_raw_event_set_idle(dev, true);
193
Sean Youngb83bfd12012-08-13 08:59:47 -0300194 return 1;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300195}
196EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
197
Maxim Levitsky46519182010-10-16 19:56:28 -0300198/**
David Härdemand8b4b582010-10-29 16:08:23 -0300199 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
200 * @dev: the struct rc_dev device descriptor
201 * @idle: whether the device is idle or not
Maxim Levitsky46519182010-10-16 19:56:28 -0300202 */
David Härdemand8b4b582010-10-29 16:08:23 -0300203void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300204{
David Härdemand8b4b582010-10-29 16:08:23 -0300205 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300206 return;
207
Sean Young1f17f682018-02-12 07:27:50 -0500208 dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave");
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300209
210 if (idle) {
David Härdemand8b4b582010-10-29 16:08:23 -0300211 dev->raw->this_ev.timeout = true;
212 ir_raw_event_store(dev, &dev->raw->this_ev);
Sean Young183e19f2018-08-21 15:57:52 -0400213 dev->raw->this_ev = (struct ir_raw_event) {};
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300214 }
Maxim Levitsky46519182010-10-16 19:56:28 -0300215
David Härdemand8b4b582010-10-29 16:08:23 -0300216 if (dev->s_idle)
217 dev->s_idle(dev, idle);
218
219 dev->idle = idle;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300220}
221EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
222
223/**
David Härdeman724e2492010-04-08 13:10:00 -0300224 * ir_raw_event_handle() - schedules the decoding of stored ir data
David Härdemand8b4b582010-10-29 16:08:23 -0300225 * @dev: the struct rc_dev device descriptor
David Härdeman724e2492010-04-08 13:10:00 -0300226 *
David Härdemand8b4b582010-10-29 16:08:23 -0300227 * This routine will tell rc-core to start decoding stored ir data.
David Härdeman724e2492010-04-08 13:10:00 -0300228 */
David Härdemand8b4b582010-10-29 16:08:23 -0300229void ir_raw_event_handle(struct rc_dev *dev)
David Härdeman724e2492010-04-08 13:10:00 -0300230{
Sean Young963761a2017-05-24 06:24:51 -0300231 if (!dev->raw || !dev->raw->thread)
David Härdeman724e2492010-04-08 13:10:00 -0300232 return;
233
David Härdemand8b4b582010-10-29 16:08:23 -0300234 wake_up_process(dev->raw->thread);
David Härdeman724e2492010-04-08 13:10:00 -0300235}
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300236EXPORT_SYMBOL_GPL(ir_raw_event_handle);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300237
David Härdeman667c9eb2010-06-13 17:29:31 -0300238/* used internally by the sysfs interface */
239u64
Randy Dunlap2dbd61b2011-01-09 00:53:53 -0300240ir_raw_get_allowed_protocols(void)
David Härdeman667c9eb2010-06-13 17:29:31 -0300241{
Heiner Kallweit37e90a22016-09-27 16:48:47 -0300242 return atomic64_read(&available_protocols);
David Härdeman667c9eb2010-06-13 17:29:31 -0300243}
244
Sean Young6d741bf2017-08-07 16:20:58 -0400245static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
David Härdemanda6e1622014-04-03 20:32:16 -0300246{
Sean Younga86d6df2018-03-23 16:47:37 -0400247 struct ir_raw_handler *handler;
248 u32 timeout = 0;
249
Sean Youngc00cb582017-11-12 16:34:59 -0500250 mutex_lock(&ir_raw_handler_lock);
251 list_for_each_entry(handler, &ir_raw_handler_list, list) {
252 if (!(dev->enabled_protocols & handler->protocols) &&
253 (*rc_proto & handler->protocols) && handler->raw_register)
254 handler->raw_register(dev);
255
256 if ((dev->enabled_protocols & handler->protocols) &&
257 !(*rc_proto & handler->protocols) &&
258 handler->raw_unregister)
259 handler->raw_unregister(dev);
260 }
261 mutex_unlock(&ir_raw_handler_lock);
262
Sean Younga86d6df2018-03-23 16:47:37 -0400263 if (!dev->max_timeout)
264 return 0;
265
266 mutex_lock(&ir_raw_handler_lock);
267 list_for_each_entry(handler, &ir_raw_handler_list, list) {
268 if (handler->protocols & *rc_proto) {
269 if (timeout < handler->min_timeout)
270 timeout = handler->min_timeout;
271 }
272 }
273 mutex_unlock(&ir_raw_handler_lock);
274
275 if (timeout == 0)
276 timeout = IR_DEFAULT_TIMEOUT;
277 else
Sean Young528222d82020-08-23 19:23:05 +0200278 timeout += MS_TO_US(10);
Sean Younga86d6df2018-03-23 16:47:37 -0400279
280 if (timeout < dev->min_timeout)
281 timeout = dev->min_timeout;
282 else if (timeout > dev->max_timeout)
283 timeout = dev->max_timeout;
284
285 if (dev->s_timeout)
286 dev->s_timeout(dev, timeout);
287 else
288 dev->timeout = timeout;
289
David Härdemanda6e1622014-04-03 20:32:16 -0300290 return 0;
291}
292
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200293static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
294{
295 mutex_lock(&dev->lock);
296 dev->enabled_protocols &= ~protocols;
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200297 mutex_unlock(&dev->lock);
298}
299
James Hogan3875233d2015-03-31 14:48:06 -0300300/**
Antti Seppälä844a4f42015-03-31 14:48:07 -0300301 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
302 * @ev: Pointer to pointer to next free event. *@ev is incremented for
303 * each raw event filled.
304 * @max: Maximum number of raw events to fill.
305 * @timings: Manchester modulation timings.
306 * @n: Number of bits of data.
307 * @data: Data bits to encode.
308 *
309 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
310 * modulation with the timing characteristics described by @timings, writing up
311 * to @max raw IR events using the *@ev pointer.
312 *
313 * Returns: 0 on success.
314 * -ENOBUFS if there isn't enough space in the array to fit the
315 * full encoded data. In this case all @max events will have been
316 * written.
317 */
318int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
319 const struct ir_raw_timings_manchester *timings,
Sean Youngb73bc162017-02-11 20:33:38 -0200320 unsigned int n, u64 data)
Antti Seppälä844a4f42015-03-31 14:48:07 -0300321{
322 bool need_pulse;
Sean Youngb73bc162017-02-11 20:33:38 -0200323 u64 i;
Antti Seppälä844a4f42015-03-31 14:48:07 -0300324 int ret = -ENOBUFS;
325
Sean Youngb73bc162017-02-11 20:33:38 -0200326 i = BIT_ULL(n - 1);
Antti Seppälä844a4f42015-03-31 14:48:07 -0300327
Sean Youngddf9c1b2018-01-05 08:26:51 -0500328 if (timings->leader_pulse) {
Antti Seppälä844a4f42015-03-31 14:48:07 -0300329 if (!max--)
330 return ret;
Sean Youngddf9c1b2018-01-05 08:26:51 -0500331 init_ir_raw_event_duration((*ev), 1, timings->leader_pulse);
332 if (timings->leader_space) {
Antti Seppälä844a4f42015-03-31 14:48:07 -0300333 if (!max--)
334 return ret;
Sean Youngddf9c1b2018-01-05 08:26:51 -0500335 init_ir_raw_event_duration(++(*ev), 0,
336 timings->leader_space);
Antti Seppälä844a4f42015-03-31 14:48:07 -0300337 }
Antti Seppälä844a4f42015-03-31 14:48:07 -0300338 } else {
339 /* continue existing signal */
340 --(*ev);
341 }
342 /* from here on *ev will point to the last event rather than the next */
343
344 while (n && i > 0) {
345 need_pulse = !(data & i);
346 if (timings->invert)
347 need_pulse = !need_pulse;
348 if (need_pulse == !!(*ev)->pulse) {
349 (*ev)->duration += timings->clock;
350 } else {
351 if (!max--)
352 goto nobufs;
353 init_ir_raw_event_duration(++(*ev), need_pulse,
354 timings->clock);
355 }
356
357 if (!max--)
358 goto nobufs;
359 init_ir_raw_event_duration(++(*ev), !need_pulse,
360 timings->clock);
361 i >>= 1;
362 }
363
364 if (timings->trailer_space) {
365 if (!(*ev)->pulse)
366 (*ev)->duration += timings->trailer_space;
367 else if (!max--)
368 goto nobufs;
369 else
370 init_ir_raw_event_duration(++(*ev), 0,
371 timings->trailer_space);
372 }
373
374 ret = 0;
375nobufs:
376 /* point to the next event rather than last event before returning */
377 ++(*ev);
378 return ret;
379}
380EXPORT_SYMBOL(ir_raw_gen_manchester);
381
382/**
James Hogancaec0982014-03-14 20:04:12 -0300383 * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
384 * @ev: Pointer to pointer to next free event. *@ev is incremented for
385 * each raw event filled.
386 * @max: Maximum number of raw events to fill.
387 * @timings: Pulse distance modulation timings.
388 * @n: Number of bits of data.
389 * @data: Data bits to encode.
390 *
391 * Encodes the @n least significant bits of @data using pulse-distance
392 * modulation with the timing characteristics described by @timings, writing up
393 * to @max raw IR events using the *@ev pointer.
394 *
395 * Returns: 0 on success.
396 * -ENOBUFS if there isn't enough space in the array to fit the
397 * full encoded data. In this case all @max events will have been
398 * written.
399 */
400int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
401 const struct ir_raw_timings_pd *timings,
402 unsigned int n, u64 data)
403{
404 int i;
405 int ret;
406 unsigned int space;
407
408 if (timings->header_pulse) {
409 ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
410 timings->header_space);
411 if (ret)
412 return ret;
413 }
414
415 if (timings->msb_first) {
416 for (i = n - 1; i >= 0; --i) {
417 space = timings->bit_space[(data >> i) & 1];
418 ret = ir_raw_gen_pulse_space(ev, &max,
419 timings->bit_pulse,
420 space);
421 if (ret)
422 return ret;
423 }
424 } else {
425 for (i = 0; i < n; ++i, data >>= 1) {
426 space = timings->bit_space[data & 1];
427 ret = ir_raw_gen_pulse_space(ev, &max,
428 timings->bit_pulse,
429 space);
430 if (ret)
431 return ret;
432 }
433 }
434
435 ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
436 timings->trailer_space);
437 return ret;
438}
439EXPORT_SYMBOL(ir_raw_gen_pd);
440
441/**
Sean Young103293b2016-12-06 18:33:57 -0200442 * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
443 * @ev: Pointer to pointer to next free event. *@ev is incremented for
444 * each raw event filled.
445 * @max: Maximum number of raw events to fill.
446 * @timings: Pulse distance modulation timings.
447 * @n: Number of bits of data.
448 * @data: Data bits to encode.
449 *
450 * Encodes the @n least significant bits of @data using space-distance
451 * modulation with the timing characteristics described by @timings, writing up
452 * to @max raw IR events using the *@ev pointer.
453 *
454 * Returns: 0 on success.
455 * -ENOBUFS if there isn't enough space in the array to fit the
456 * full encoded data. In this case all @max events will have been
457 * written.
458 */
459int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
460 const struct ir_raw_timings_pl *timings,
461 unsigned int n, u64 data)
462{
463 int i;
464 int ret = -ENOBUFS;
465 unsigned int pulse;
466
467 if (!max--)
468 return ret;
469
470 init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
471
472 if (timings->msb_first) {
473 for (i = n - 1; i >= 0; --i) {
474 if (!max--)
475 return ret;
476 init_ir_raw_event_duration((*ev)++, 0,
477 timings->bit_space);
478 if (!max--)
479 return ret;
480 pulse = timings->bit_pulse[(data >> i) & 1];
481 init_ir_raw_event_duration((*ev)++, 1, pulse);
482 }
483 } else {
484 for (i = 0; i < n; ++i, data >>= 1) {
485 if (!max--)
486 return ret;
487 init_ir_raw_event_duration((*ev)++, 0,
488 timings->bit_space);
489 if (!max--)
490 return ret;
491 pulse = timings->bit_pulse[data & 1];
492 init_ir_raw_event_duration((*ev)++, 1, pulse);
493 }
494 }
495
496 if (!max--)
497 return ret;
498
499 init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
500
501 return 0;
502}
503EXPORT_SYMBOL(ir_raw_gen_pl);
504
505/**
James Hogan3875233d2015-03-31 14:48:06 -0300506 * ir_raw_encode_scancode() - Encode a scancode as raw events
507 *
508 * @protocol: protocol
509 * @scancode: scancode filter describing a single scancode
510 * @events: array of raw events to write into
511 * @max: max number of raw events
512 *
513 * Attempts to encode the scancode as raw events.
514 *
515 * Returns: The number of events written.
516 * -ENOBUFS if there isn't enough space in the array to fit the
517 * encoding. In this case all @max events will have been written.
518 * -EINVAL if the scancode is ambiguous or invalid, or if no
519 * compatible encoder was found.
520 */
Sean Young6d741bf2017-08-07 16:20:58 -0400521int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
James Hogan3875233d2015-03-31 14:48:06 -0300522 struct ir_raw_event *events, unsigned int max)
523{
524 struct ir_raw_handler *handler;
525 int ret = -EINVAL;
526 u64 mask = 1ULL << protocol;
527
Sean Young0d39ab02017-02-25 06:51:31 -0500528 ir_raw_load_modules(&mask);
529
James Hogan3875233d2015-03-31 14:48:06 -0300530 mutex_lock(&ir_raw_handler_lock);
531 list_for_each_entry(handler, &ir_raw_handler_list, list) {
532 if (handler->protocols & mask && handler->encode) {
533 ret = handler->encode(protocol, scancode, events, max);
534 if (ret >= 0 || ret == -ENOBUFS)
535 break;
536 }
537 }
538 mutex_unlock(&ir_raw_handler_lock);
539
540 return ret;
541}
542EXPORT_SYMBOL(ir_raw_encode_scancode);
543
Sean Younge3e389f2018-02-14 10:26:17 -0500544/**
545 * ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing
546 *
547 * @t: timer_list
548 *
549 * This callback is armed by ir_raw_event_store_edge(). It does two things:
550 * first of all, rather than calling ir_raw_event_handle() for each
551 * edge and waking up the rc thread, 15 ms after the first edge
552 * ir_raw_event_handle() is called. Secondly, generate a timeout event
553 * no more IR is received after the rc_dev timeout.
554 */
555static void ir_raw_edge_handle(struct timer_list *t)
Sean Younge5e26432017-08-06 15:25:52 -0400556{
Kees Cookb17ec782017-10-24 11:23:14 -0400557 struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
558 struct rc_dev *dev = raw->dev;
Sean Younge3e389f2018-02-14 10:26:17 -0500559 unsigned long flags;
560 ktime_t interval;
Sean Young48b2de12017-08-07 08:30:18 -0400561
Sean Younge3e389f2018-02-14 10:26:17 -0500562 spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
563 interval = ktime_sub(ktime_get(), dev->raw->last_event);
Sean Young528222d82020-08-23 19:23:05 +0200564 if (ktime_to_us(interval) >= dev->timeout) {
Sean Young183e19f2018-08-21 15:57:52 -0400565 struct ir_raw_event ev = {
566 .timeout = true,
Sean Young528222d82020-08-23 19:23:05 +0200567 .duration = ktime_to_us(interval)
Sean Young183e19f2018-08-21 15:57:52 -0400568 };
Sean Young48b2de12017-08-07 08:30:18 -0400569
570 ir_raw_event_store(dev, &ev);
571 } else {
572 mod_timer(&dev->raw->edge_handle,
Sean Young528222d82020-08-23 19:23:05 +0200573 jiffies + usecs_to_jiffies(dev->timeout -
574 ktime_to_us(interval)));
Sean Young48b2de12017-08-07 08:30:18 -0400575 }
Sean Younge3e389f2018-02-14 10:26:17 -0500576 spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
Sean Younge5e26432017-08-06 15:25:52 -0400577
578 ir_raw_event_handle(dev);
579}
580
Sean Youngcdfaa012017-02-25 06:51:30 -0500581/**
582 * ir_raw_encode_carrier() - Get carrier used for protocol
583 *
584 * @protocol: protocol
585 *
586 * Attempts to find the carrier for the specified protocol
587 *
588 * Returns: The carrier in Hz
589 * -EINVAL if the protocol is invalid, or if no
590 * compatible encoder was found.
591 */
592int ir_raw_encode_carrier(enum rc_proto protocol)
593{
594 struct ir_raw_handler *handler;
595 int ret = -EINVAL;
596 u64 mask = BIT_ULL(protocol);
597
598 mutex_lock(&ir_raw_handler_lock);
599 list_for_each_entry(handler, &ir_raw_handler_list, list) {
600 if (handler->protocols & mask && handler->encode) {
601 ret = handler->carrier;
602 break;
603 }
604 }
605 mutex_unlock(&ir_raw_handler_lock);
606
607 return ret;
608}
609EXPORT_SYMBOL(ir_raw_encode_carrier);
610
David Härdeman667c9eb2010-06-13 17:29:31 -0300611/*
612 * Used to (un)register raw event clients
613 */
David Härdemanf56928a2017-05-03 07:04:00 -0300614int ir_raw_event_prepare(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300615{
David Härdemand8b4b582010-10-29 16:08:23 -0300616 if (!dev)
617 return -EINVAL;
618
619 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
620 if (!dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300621 return -ENOMEM;
622
David Härdemand8b4b582010-10-29 16:08:23 -0300623 dev->raw->dev = dev;
David Härdemanda6e1622014-04-03 20:32:16 -0300624 dev->change_protocol = change_protocol;
Sean Younge0d51e62018-05-10 16:41:15 -0400625 dev->idle = true;
Sean Younge3e389f2018-02-14 10:26:17 -0500626 spin_lock_init(&dev->raw->edge_spinlock);
627 timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
Heiner Kallweit464254e2015-11-27 20:02:38 -0200628 INIT_KFIFO(dev->raw->kfifo);
David Härdeman667c9eb2010-06-13 17:29:31 -0300629
David Härdemanf56928a2017-05-03 07:04:00 -0300630 return 0;
631}
632
633int ir_raw_event_register(struct rc_dev *dev)
634{
David Härdemanf56928a2017-05-03 07:04:00 -0300635 struct task_struct *thread;
636
Sean Younga60d64b2017-09-23 10:41:13 -0400637 thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
638 if (IS_ERR(thread))
639 return PTR_ERR(thread);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300640
Sean Younga60d64b2017-09-23 10:41:13 -0400641 dev->raw->thread = thread;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300642
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300643 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300644 list_add_tail(&dev->raw->list, &ir_raw_client_list);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300645 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300646
David Härdemanc2163692010-06-13 17:29:36 -0300647 return 0;
David Härdemanf56928a2017-05-03 07:04:00 -0300648}
David Härdemand8b4b582010-10-29 16:08:23 -0300649
David Härdemanf56928a2017-05-03 07:04:00 -0300650void ir_raw_event_free(struct rc_dev *dev)
651{
652 if (!dev)
653 return;
654
David Härdemand8b4b582010-10-29 16:08:23 -0300655 kfree(dev->raw);
656 dev->raw = NULL;
David Härdeman667c9eb2010-06-13 17:29:31 -0300657}
658
David Härdemand8b4b582010-10-29 16:08:23 -0300659void ir_raw_event_unregister(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300660{
David Härdemanc2163692010-06-13 17:29:36 -0300661 struct ir_raw_handler *handler;
David Härdeman667c9eb2010-06-13 17:29:31 -0300662
David Härdemand8b4b582010-10-29 16:08:23 -0300663 if (!dev || !dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300664 return;
665
David Härdemand8b4b582010-10-29 16:08:23 -0300666 kthread_stop(dev->raw->thread);
Sean Younge5e26432017-08-06 15:25:52 -0400667 del_timer_sync(&dev->raw->edge_handle);
David Härdemanc2163692010-06-13 17:29:36 -0300668
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300669 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300670 list_del(&dev->raw->list);
David Härdemanc2163692010-06-13 17:29:36 -0300671 list_for_each_entry(handler, &ir_raw_handler_list, list)
Sean Youngc00cb582017-11-12 16:34:59 -0500672 if (handler->raw_unregister &&
673 (handler->protocols & dev->enabled_protocols))
David Härdemand8b4b582010-10-29 16:08:23 -0300674 handler->raw_unregister(dev);
Sean Youngf4364dc2018-05-27 12:24:09 +0100675
676 lirc_bpf_free(dev);
David Härdeman667c9eb2010-06-13 17:29:31 -0300677
David Härdemanf56928a2017-05-03 07:04:00 -0300678 ir_raw_event_free(dev);
Sean Youngf4364dc2018-05-27 12:24:09 +0100679
680 /*
681 * A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so
682 * ensure that the raw member is null on unlock; this is how
683 * "device gone" is checked.
684 */
685 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300686}
687
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300688/*
689 * Extension interface - used to register the IR decoders
690 */
691
692int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
693{
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300694 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300695 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
Heiner Kallweit37e90a22016-09-27 16:48:47 -0300696 atomic64_or(ir_raw_handler->protocols, &available_protocols);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300697 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300698
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300699 return 0;
700}
701EXPORT_SYMBOL(ir_raw_handler_register);
702
703void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
704{
David Härdemanc2163692010-06-13 17:29:36 -0300705 struct ir_raw_event_ctrl *raw;
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200706 u64 protocols = ir_raw_handler->protocols;
David Härdemanc2163692010-06-13 17:29:36 -0300707
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300708 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300709 list_del(&ir_raw_handler->list);
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200710 list_for_each_entry(raw, &ir_raw_client_list, list) {
Sean Youngc00cb582017-11-12 16:34:59 -0500711 if (ir_raw_handler->raw_unregister &&
712 (raw->dev->enabled_protocols & protocols))
David Härdemand8b4b582010-10-29 16:08:23 -0300713 ir_raw_handler->raw_unregister(raw->dev);
Sean Youngc00cb582017-11-12 16:34:59 -0500714 ir_raw_disable_protocols(raw->dev, protocols);
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200715 }
Heiner Kallweit37e90a22016-09-27 16:48:47 -0300716 atomic64_andnot(protocols, &available_protocols);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300717 mutex_unlock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300718}
719EXPORT_SYMBOL(ir_raw_handler_unregister);