blob: b9e4645c731c087f3dea99fa652fe09ef19dc8de [file] [log] [blame]
David Härdeman4924a312014-04-03 20:34:28 -03001/* rc-ir-raw.c - handle IR pulse/space events
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -03002 *
Mauro Carvalho Chehab37e59f82014-02-07 08:03:07 -02003 * Copyright (C) 2010 by Mauro Carvalho Chehab
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -03004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
Paul Gortmaker35a24632011-08-01 15:26:38 -040015#include <linux/export.h>
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030016#include <linux/kthread.h>
Maxim Levitsky45a568f2010-07-31 11:59:16 -030017#include <linux/mutex.h>
Stephen Rothwelldff65de2011-07-29 15:34:32 +100018#include <linux/kmod.h>
David Härdeman724e2492010-04-08 13:10:00 -030019#include <linux/sched.h>
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030020#include <linux/freezer.h>
Mauro Carvalho Chehabf62de672010-11-09 23:09:57 -030021#include "rc-core-priv.h"
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030022
David Härdeman724e2492010-04-08 13:10:00 -030023/* Define the max number of pulse/space transitions to buffer */
24#define MAX_IR_EVENT_SIZE 512
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030025
David Härdemanc2163692010-06-13 17:29:36 -030026/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
27static LIST_HEAD(ir_raw_client_list);
28
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -030029/* Used to handle IR raw handler extensions */
Maxim Levitsky45a568f2010-07-31 11:59:16 -030030static DEFINE_MUTEX(ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -030031static LIST_HEAD(ir_raw_handler_list);
32static u64 available_protocols;
James Hogan0d830b22015-03-31 14:48:10 -030033static u64 encode_protocols;
Mauro Carvalho Chehab93c312f2010-03-25 21:13:43 -030034
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030035static int ir_raw_event_thread(void *data)
David Härdeman724e2492010-04-08 13:10:00 -030036{
David Härdemane40b1122010-04-15 18:46:00 -030037 struct ir_raw_event ev;
David Härdemanc2163692010-06-13 17:29:36 -030038 struct ir_raw_handler *handler;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030039 struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030040 int retval;
David Härdeman724e2492010-04-08 13:10:00 -030041
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030042 while (!kthread_should_stop()) {
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030043
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030044 spin_lock_irq(&raw->lock);
Srinivas Kandagatla004ac382012-03-20 14:05:40 -030045 retval = kfifo_len(&raw->kfifo);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030046
Srinivas Kandagatla004ac382012-03-20 14:05:40 -030047 if (retval < sizeof(ev)) {
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030048 set_current_state(TASK_INTERRUPTIBLE);
49
50 if (kthread_should_stop())
51 set_current_state(TASK_RUNNING);
52
53 spin_unlock_irq(&raw->lock);
54 schedule();
55 continue;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030056 }
57
Srinivas Kandagatla004ac382012-03-20 14:05:40 -030058 retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030059 spin_unlock_irq(&raw->lock);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030060
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030061 mutex_lock(&ir_raw_handler_lock);
62 list_for_each_entry(handler, &ir_raw_handler_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -030063 handler->decode(raw->dev, ev);
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030064 raw->prev_ev = ev;
65 mutex_unlock(&ir_raw_handler_lock);
David Härdemanc2163692010-06-13 17:29:36 -030066 }
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030067
68 return 0;
David Härdeman724e2492010-04-08 13:10:00 -030069}
70
David Härdeman724e2492010-04-08 13:10:00 -030071/**
72 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
David Härdemand8b4b582010-10-29 16:08:23 -030073 * @dev: the struct rc_dev device descriptor
David Härdemane40b1122010-04-15 18:46:00 -030074 * @ev: the struct ir_raw_event descriptor of the pulse/space
David Härdeman724e2492010-04-08 13:10:00 -030075 *
76 * This routine (which may be called from an interrupt context) stores a
77 * pulse/space duration for the raw ir decoding state machines. Pulses are
78 * signalled as positive values and spaces as negative values. A zero value
79 * will reset the decoding state machines.
80 */
David Härdemand8b4b582010-10-29 16:08:23 -030081int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030082{
David Härdemand8b4b582010-10-29 16:08:23 -030083 if (!dev->raw)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030084 return -EINVAL;
85
Mauro Carvalho Chehab74c47922010-10-20 11:56:50 -030086 IR_dprintk(2, "sample: (%05dus %s)\n",
David Härdemand8b4b582010-10-29 16:08:23 -030087 TO_US(ev->duration), TO_STR(ev->pulse));
Maxim Levitsky510fcb72010-07-31 11:59:15 -030088
David Härdemand8b4b582010-10-29 16:08:23 -030089 if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
David Härdeman724e2492010-04-08 13:10:00 -030090 return -ENOMEM;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030091
David Härdeman724e2492010-04-08 13:10:00 -030092 return 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030093}
94EXPORT_SYMBOL_GPL(ir_raw_event_store);
95
David Härdeman724e2492010-04-08 13:10:00 -030096/**
97 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
David Härdemand8b4b582010-10-29 16:08:23 -030098 * @dev: the struct rc_dev device descriptor
David Härdeman724e2492010-04-08 13:10:00 -030099 * @type: the type of the event that has occurred
100 *
101 * This routine (which may be called from an interrupt context) is used to
102 * store the beginning of an ir pulse or space (or the start/end of ir
103 * reception) for the raw ir decoding state machines. This is used by
104 * hardware which does not provide durations directly but only interrupts
105 * (or similar events) on state change.
106 */
David Härdemand8b4b582010-10-29 16:08:23 -0300107int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300108{
David Härdeman724e2492010-04-08 13:10:00 -0300109 ktime_t now;
110 s64 delta; /* ns */
Mauro Carvalho Chehab83587832011-01-20 18:16:50 -0300111 DEFINE_IR_RAW_EVENT(ev);
David Härdeman724e2492010-04-08 13:10:00 -0300112 int rc = 0;
Jarod Wilson3f5c4c72011-06-16 16:18:37 -0300113 int delay;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300114
David Härdemand8b4b582010-10-29 16:08:23 -0300115 if (!dev->raw)
David Härdeman724e2492010-04-08 13:10:00 -0300116 return -EINVAL;
117
118 now = ktime_get();
David Härdemand8b4b582010-10-29 16:08:23 -0300119 delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
Jarod Wilson3f5c4c72011-06-16 16:18:37 -0300120 delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
David Härdeman724e2492010-04-08 13:10:00 -0300121
122 /* Check for a long duration since last event or if we're
123 * being called for the first time, note that delta can't
124 * possibly be negative.
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300125 */
Jarod Wilson3f5c4c72011-06-16 16:18:37 -0300126 if (delta > delay || !dev->raw->last_type)
David Härdeman724e2492010-04-08 13:10:00 -0300127 type |= IR_START_EVENT;
David Härdemane40b1122010-04-15 18:46:00 -0300128 else
129 ev.duration = delta;
David Härdeman724e2492010-04-08 13:10:00 -0300130
131 if (type & IR_START_EVENT)
David Härdemand8b4b582010-10-29 16:08:23 -0300132 ir_raw_event_reset(dev);
133 else if (dev->raw->last_type & IR_SPACE) {
David Härdemane40b1122010-04-15 18:46:00 -0300134 ev.pulse = false;
David Härdemand8b4b582010-10-29 16:08:23 -0300135 rc = ir_raw_event_store(dev, &ev);
136 } else if (dev->raw->last_type & IR_PULSE) {
David Härdemane40b1122010-04-15 18:46:00 -0300137 ev.pulse = true;
David Härdemand8b4b582010-10-29 16:08:23 -0300138 rc = ir_raw_event_store(dev, &ev);
David Härdemane40b1122010-04-15 18:46:00 -0300139 } else
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300140 return 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300141
David Härdemand8b4b582010-10-29 16:08:23 -0300142 dev->raw->last_event = now;
143 dev->raw->last_type = type;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300144 return rc;
145}
David Härdeman724e2492010-04-08 13:10:00 -0300146EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
147
148/**
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300149 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
David Härdemand8b4b582010-10-29 16:08:23 -0300150 * @dev: the struct rc_dev device descriptor
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300151 * @type: the type of the event that has occurred
152 *
153 * This routine (which may be called from an interrupt context) works
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300154 * in similar manner to ir_raw_event_store_edge.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300155 * This routine is intended for devices with limited internal buffer
Sean Youngb83bfd12012-08-13 08:59:47 -0300156 * It automerges samples of same type, and handles timeouts. Returns non-zero
157 * if the event was added, and zero if the event was ignored due to idle
158 * processing.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300159 */
David Härdemand8b4b582010-10-29 16:08:23 -0300160int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300161{
David Härdemand8b4b582010-10-29 16:08:23 -0300162 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300163 return -EINVAL;
164
165 /* Ignore spaces in idle mode */
David Härdemand8b4b582010-10-29 16:08:23 -0300166 if (dev->idle && !ev->pulse)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300167 return 0;
David Härdemand8b4b582010-10-29 16:08:23 -0300168 else if (dev->idle)
169 ir_raw_event_set_idle(dev, false);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300170
David Härdemand8b4b582010-10-29 16:08:23 -0300171 if (!dev->raw->this_ev.duration)
172 dev->raw->this_ev = *ev;
173 else if (ev->pulse == dev->raw->this_ev.pulse)
174 dev->raw->this_ev.duration += ev->duration;
175 else {
176 ir_raw_event_store(dev, &dev->raw->this_ev);
177 dev->raw->this_ev = *ev;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300178 }
179
180 /* Enter idle mode if nessesary */
David Härdemand8b4b582010-10-29 16:08:23 -0300181 if (!ev->pulse && dev->timeout &&
182 dev->raw->this_ev.duration >= dev->timeout)
183 ir_raw_event_set_idle(dev, true);
184
Sean Youngb83bfd12012-08-13 08:59:47 -0300185 return 1;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300186}
187EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
188
Maxim Levitsky46519182010-10-16 19:56:28 -0300189/**
David Härdemand8b4b582010-10-29 16:08:23 -0300190 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
191 * @dev: the struct rc_dev device descriptor
192 * @idle: whether the device is idle or not
Maxim Levitsky46519182010-10-16 19:56:28 -0300193 */
David Härdemand8b4b582010-10-29 16:08:23 -0300194void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300195{
David Härdemand8b4b582010-10-29 16:08:23 -0300196 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300197 return;
198
Maxim Levitsky46519182010-10-16 19:56:28 -0300199 IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300200
201 if (idle) {
David Härdemand8b4b582010-10-29 16:08:23 -0300202 dev->raw->this_ev.timeout = true;
203 ir_raw_event_store(dev, &dev->raw->this_ev);
204 init_ir_raw_event(&dev->raw->this_ev);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300205 }
Maxim Levitsky46519182010-10-16 19:56:28 -0300206
David Härdemand8b4b582010-10-29 16:08:23 -0300207 if (dev->s_idle)
208 dev->s_idle(dev, idle);
209
210 dev->idle = idle;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300211}
212EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
213
214/**
David Härdeman724e2492010-04-08 13:10:00 -0300215 * ir_raw_event_handle() - schedules the decoding of stored ir data
David Härdemand8b4b582010-10-29 16:08:23 -0300216 * @dev: the struct rc_dev device descriptor
David Härdeman724e2492010-04-08 13:10:00 -0300217 *
David Härdemand8b4b582010-10-29 16:08:23 -0300218 * This routine will tell rc-core to start decoding stored ir data.
David Härdeman724e2492010-04-08 13:10:00 -0300219 */
David Härdemand8b4b582010-10-29 16:08:23 -0300220void ir_raw_event_handle(struct rc_dev *dev)
David Härdeman724e2492010-04-08 13:10:00 -0300221{
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -0300222 unsigned long flags;
David Härdeman724e2492010-04-08 13:10:00 -0300223
David Härdemand8b4b582010-10-29 16:08:23 -0300224 if (!dev->raw)
David Härdeman724e2492010-04-08 13:10:00 -0300225 return;
226
David Härdemand8b4b582010-10-29 16:08:23 -0300227 spin_lock_irqsave(&dev->raw->lock, flags);
228 wake_up_process(dev->raw->thread);
229 spin_unlock_irqrestore(&dev->raw->lock, flags);
David Härdeman724e2492010-04-08 13:10:00 -0300230}
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300231EXPORT_SYMBOL_GPL(ir_raw_event_handle);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300232
David Härdeman667c9eb2010-06-13 17:29:31 -0300233/* used internally by the sysfs interface */
234u64
Randy Dunlap2dbd61b2011-01-09 00:53:53 -0300235ir_raw_get_allowed_protocols(void)
David Härdeman667c9eb2010-06-13 17:29:31 -0300236{
237 u64 protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300238 mutex_lock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300239 protocols = available_protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300240 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300241 return protocols;
242}
243
James Hogan0d830b22015-03-31 14:48:10 -0300244/* used internally by the sysfs interface */
245u64
246ir_raw_get_encode_protocols(void)
247{
248 u64 protocols;
249
250 mutex_lock(&ir_raw_handler_lock);
251 protocols = encode_protocols;
252 mutex_unlock(&ir_raw_handler_lock);
253 return protocols;
254}
255
David Härdemanda6e1622014-04-03 20:32:16 -0300256static int change_protocol(struct rc_dev *dev, u64 *rc_type)
257{
258 /* the caller will update dev->enabled_protocols */
259 return 0;
260}
261
James Hogan9869da52015-03-31 14:48:06 -0300262/**
Antti Seppälä1d971d92015-03-31 14:48:07 -0300263 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
264 * @ev: Pointer to pointer to next free event. *@ev is incremented for
265 * each raw event filled.
266 * @max: Maximum number of raw events to fill.
267 * @timings: Manchester modulation timings.
268 * @n: Number of bits of data.
269 * @data: Data bits to encode.
270 *
271 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
272 * modulation with the timing characteristics described by @timings, writing up
273 * to @max raw IR events using the *@ev pointer.
274 *
275 * Returns: 0 on success.
276 * -ENOBUFS if there isn't enough space in the array to fit the
277 * full encoded data. In this case all @max events will have been
278 * written.
279 */
280int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
281 const struct ir_raw_timings_manchester *timings,
282 unsigned int n, unsigned int data)
283{
284 bool need_pulse;
285 unsigned int i;
286 int ret = -ENOBUFS;
287
288 i = 1 << (n - 1);
289
290 if (timings->leader) {
291 if (!max--)
292 return ret;
293 if (timings->pulse_space_start) {
294 init_ir_raw_event_duration((*ev)++, 1, timings->leader);
295
296 if (!max--)
297 return ret;
298 init_ir_raw_event_duration((*ev), 0, timings->leader);
299 } else {
300 init_ir_raw_event_duration((*ev), 1, timings->leader);
301 }
302 i >>= 1;
303 } else {
304 /* continue existing signal */
305 --(*ev);
306 }
307 /* from here on *ev will point to the last event rather than the next */
308
309 while (n && i > 0) {
310 need_pulse = !(data & i);
311 if (timings->invert)
312 need_pulse = !need_pulse;
313 if (need_pulse == !!(*ev)->pulse) {
314 (*ev)->duration += timings->clock;
315 } else {
316 if (!max--)
317 goto nobufs;
318 init_ir_raw_event_duration(++(*ev), need_pulse,
319 timings->clock);
320 }
321
322 if (!max--)
323 goto nobufs;
324 init_ir_raw_event_duration(++(*ev), !need_pulse,
325 timings->clock);
326 i >>= 1;
327 }
328
329 if (timings->trailer_space) {
330 if (!(*ev)->pulse)
331 (*ev)->duration += timings->trailer_space;
332 else if (!max--)
333 goto nobufs;
334 else
335 init_ir_raw_event_duration(++(*ev), 0,
336 timings->trailer_space);
337 }
338
339 ret = 0;
340nobufs:
341 /* point to the next event rather than last event before returning */
342 ++(*ev);
343 return ret;
344}
345EXPORT_SYMBOL(ir_raw_gen_manchester);
346
347/**
James Hogan9869da52015-03-31 14:48:06 -0300348 * ir_raw_encode_scancode() - Encode a scancode as raw events
349 *
350 * @protocols: permitted protocols
351 * @scancode: scancode filter describing a single scancode
352 * @events: array of raw events to write into
353 * @max: max number of raw events
354 *
355 * Attempts to encode the scancode as raw events.
356 *
357 * Returns: The number of events written.
358 * -ENOBUFS if there isn't enough space in the array to fit the
359 * encoding. In this case all @max events will have been written.
360 * -EINVAL if the scancode is ambiguous or invalid, or if no
361 * compatible encoder was found.
362 */
363int ir_raw_encode_scancode(u64 protocols,
364 const struct rc_scancode_filter *scancode,
365 struct ir_raw_event *events, unsigned int max)
366{
367 struct ir_raw_handler *handler;
368 int ret = -EINVAL;
369
370 mutex_lock(&ir_raw_handler_lock);
371 list_for_each_entry(handler, &ir_raw_handler_list, list) {
372 if (handler->protocols & protocols && handler->encode) {
373 ret = handler->encode(protocols, scancode, events, max);
374 if (ret >= 0 || ret == -ENOBUFS)
375 break;
376 }
377 }
378 mutex_unlock(&ir_raw_handler_lock);
379
380 return ret;
381}
382EXPORT_SYMBOL(ir_raw_encode_scancode);
383
David Härdeman667c9eb2010-06-13 17:29:31 -0300384/*
385 * Used to (un)register raw event clients
386 */
David Härdemand8b4b582010-10-29 16:08:23 -0300387int ir_raw_event_register(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300388{
David Härdeman667c9eb2010-06-13 17:29:31 -0300389 int rc;
David Härdemanc2163692010-06-13 17:29:36 -0300390 struct ir_raw_handler *handler;
David Härdeman667c9eb2010-06-13 17:29:31 -0300391
David Härdemand8b4b582010-10-29 16:08:23 -0300392 if (!dev)
393 return -EINVAL;
394
395 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
396 if (!dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300397 return -ENOMEM;
398
David Härdemand8b4b582010-10-29 16:08:23 -0300399 dev->raw->dev = dev;
David Härdemanda6e1622014-04-03 20:32:16 -0300400 dev->change_protocol = change_protocol;
David Härdemand8b4b582010-10-29 16:08:23 -0300401 rc = kfifo_alloc(&dev->raw->kfifo,
402 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
David Härdeman667c9eb2010-06-13 17:29:31 -0300403 GFP_KERNEL);
David Härdemand8b4b582010-10-29 16:08:23 -0300404 if (rc < 0)
405 goto out;
David Härdeman667c9eb2010-06-13 17:29:31 -0300406
David Härdemand8b4b582010-10-29 16:08:23 -0300407 spin_lock_init(&dev->raw->lock);
408 dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
409 "rc%ld", dev->devno);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300410
David Härdemand8b4b582010-10-29 16:08:23 -0300411 if (IS_ERR(dev->raw->thread)) {
412 rc = PTR_ERR(dev->raw->thread);
413 goto out;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300414 }
415
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300416 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300417 list_add_tail(&dev->raw->list, &ir_raw_client_list);
David Härdemanc2163692010-06-13 17:29:36 -0300418 list_for_each_entry(handler, &ir_raw_handler_list, list)
419 if (handler->raw_register)
David Härdemand8b4b582010-10-29 16:08:23 -0300420 handler->raw_register(dev);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300421 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300422
David Härdemanc2163692010-06-13 17:29:36 -0300423 return 0;
David Härdemand8b4b582010-10-29 16:08:23 -0300424
425out:
426 kfree(dev->raw);
427 dev->raw = NULL;
428 return rc;
David Härdeman667c9eb2010-06-13 17:29:31 -0300429}
430
David Härdemand8b4b582010-10-29 16:08:23 -0300431void ir_raw_event_unregister(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300432{
David Härdemanc2163692010-06-13 17:29:36 -0300433 struct ir_raw_handler *handler;
David Härdeman667c9eb2010-06-13 17:29:31 -0300434
David Härdemand8b4b582010-10-29 16:08:23 -0300435 if (!dev || !dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300436 return;
437
David Härdemand8b4b582010-10-29 16:08:23 -0300438 kthread_stop(dev->raw->thread);
David Härdemanc2163692010-06-13 17:29:36 -0300439
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300440 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300441 list_del(&dev->raw->list);
David Härdemanc2163692010-06-13 17:29:36 -0300442 list_for_each_entry(handler, &ir_raw_handler_list, list)
443 if (handler->raw_unregister)
David Härdemand8b4b582010-10-29 16:08:23 -0300444 handler->raw_unregister(dev);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300445 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300446
David Härdemand8b4b582010-10-29 16:08:23 -0300447 kfifo_free(&dev->raw->kfifo);
448 kfree(dev->raw);
449 dev->raw = NULL;
David Härdeman667c9eb2010-06-13 17:29:31 -0300450}
451
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300452/*
453 * Extension interface - used to register the IR decoders
454 */
455
456int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
457{
David Härdemanc2163692010-06-13 17:29:36 -0300458 struct ir_raw_event_ctrl *raw;
459
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300460 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300461 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
David Härdemanc2163692010-06-13 17:29:36 -0300462 if (ir_raw_handler->raw_register)
463 list_for_each_entry(raw, &ir_raw_client_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -0300464 ir_raw_handler->raw_register(raw->dev);
David Härdeman667c9eb2010-06-13 17:29:31 -0300465 available_protocols |= ir_raw_handler->protocols;
James Hogan0d830b22015-03-31 14:48:10 -0300466 if (ir_raw_handler->encode)
467 encode_protocols |= ir_raw_handler->protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300468 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300469
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300470 return 0;
471}
472EXPORT_SYMBOL(ir_raw_handler_register);
473
474void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
475{
David Härdemanc2163692010-06-13 17:29:36 -0300476 struct ir_raw_event_ctrl *raw;
477
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300478 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300479 list_del(&ir_raw_handler->list);
David Härdemanc2163692010-06-13 17:29:36 -0300480 if (ir_raw_handler->raw_unregister)
481 list_for_each_entry(raw, &ir_raw_client_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -0300482 ir_raw_handler->raw_unregister(raw->dev);
David Härdeman667c9eb2010-06-13 17:29:31 -0300483 available_protocols &= ~ir_raw_handler->protocols;
James Hogan0d830b22015-03-31 14:48:10 -0300484 if (ir_raw_handler->encode)
485 encode_protocols &= ~ir_raw_handler->protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300486 mutex_unlock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300487}
488EXPORT_SYMBOL(ir_raw_handler_unregister);
489
Konstantin Khlebnikova4bb6f32012-12-14 07:02:48 -0300490void ir_raw_init(void)
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300491{
492 /* Load the decoder modules */
493
494 load_nec_decode();
Mauro Carvalho Chehabdb1423a2010-04-04 10:27:20 -0300495 load_rc5_decode();
David Härdeman784a4932010-04-08 20:04:40 -0300496 load_rc6_decode();
David Härdemanbf670f62010-04-15 18:46:05 -0300497 load_jvc_decode();
David Härdeman3fe29c82010-04-15 18:46:10 -0300498 load_sony_decode();
Mauro Carvalho Chehabb32e7242011-11-23 12:04:08 -0300499 load_sanyo_decode();
James Hogan324a6672014-02-05 19:15:16 -0300500 load_sharp_decode();
Jarod Wilsonf5f2cc62011-07-13 18:09:48 -0300501 load_mce_kbd_decode();
Jarod Wilsonca414692010-07-03 01:07:53 -0300502 load_lirc_codec();
Marcel J.E. Mol1dee9b52014-07-26 17:28:26 -0300503 load_xmp_decode();
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300504
505 /* If needed, we may later add some init code. In this case,
Mauro Carvalho Chehab6bda9642010-11-17 13:28:38 -0300506 it is needed to change the CONFIG_MODULE test at rc-core.h
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300507 */
508}