David Härdeman | 4924a31 | 2014-04-03 20:34:28 -0300 | [diff] [blame] | 1 | /* rc-ir-raw.c - handle IR pulse/space events |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 2 | * |
Mauro Carvalho Chehab | 37e59f8 | 2014-02-07 08:03:07 -0200 | [diff] [blame] | 3 | * Copyright (C) 2010 by Mauro Carvalho Chehab |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation version 2 of the License. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | */ |
| 14 | |
Paul Gortmaker | 35a2463 | 2011-08-01 15:26:38 -0400 | [diff] [blame] | 15 | #include <linux/export.h> |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 16 | #include <linux/kthread.h> |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 17 | #include <linux/mutex.h> |
Stephen Rothwell | dff65de | 2011-07-29 15:34:32 +1000 | [diff] [blame] | 18 | #include <linux/kmod.h> |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 19 | #include <linux/sched.h> |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 20 | #include <linux/freezer.h> |
Mauro Carvalho Chehab | f62de67 | 2010-11-09 23:09:57 -0300 | [diff] [blame] | 21 | #include "rc-core-priv.h" |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 22 | |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 23 | /* Define the max number of pulse/space transitions to buffer */ |
| 24 | #define MAX_IR_EVENT_SIZE 512 |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 25 | |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 26 | /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */ |
| 27 | static LIST_HEAD(ir_raw_client_list); |
| 28 | |
Mauro Carvalho Chehab | 995187b | 2010-03-24 20:47:53 -0300 | [diff] [blame] | 29 | /* Used to handle IR raw handler extensions */ |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 30 | static DEFINE_MUTEX(ir_raw_handler_lock); |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 31 | static LIST_HEAD(ir_raw_handler_list); |
| 32 | static u64 available_protocols; |
James Hogan | 0d830b2 | 2015-03-31 14:48:10 -0300 | [diff] [blame] | 33 | static u64 encode_protocols; |
Mauro Carvalho Chehab | 93c312f | 2010-03-25 21:13:43 -0300 | [diff] [blame] | 34 | |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 35 | static int ir_raw_event_thread(void *data) |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 36 | { |
David Härdeman | e40b112 | 2010-04-15 18:46:00 -0300 | [diff] [blame] | 37 | struct ir_raw_event ev; |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 38 | struct ir_raw_handler *handler; |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 39 | struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data; |
Maxim Levitsky | c6ef1e7 | 2010-09-06 18:26:06 -0300 | [diff] [blame] | 40 | int retval; |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 41 | |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 42 | while (!kthread_should_stop()) { |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 43 | |
Maxim Levitsky | c6ef1e7 | 2010-09-06 18:26:06 -0300 | [diff] [blame] | 44 | spin_lock_irq(&raw->lock); |
Srinivas Kandagatla | 004ac38 | 2012-03-20 14:05:40 -0300 | [diff] [blame] | 45 | retval = kfifo_len(&raw->kfifo); |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 46 | |
Srinivas Kandagatla | 004ac38 | 2012-03-20 14:05:40 -0300 | [diff] [blame] | 47 | if (retval < sizeof(ev)) { |
Maxim Levitsky | c6ef1e7 | 2010-09-06 18:26:06 -0300 | [diff] [blame] | 48 | set_current_state(TASK_INTERRUPTIBLE); |
| 49 | |
| 50 | if (kthread_should_stop()) |
| 51 | set_current_state(TASK_RUNNING); |
| 52 | |
| 53 | spin_unlock_irq(&raw->lock); |
| 54 | schedule(); |
| 55 | continue; |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 56 | } |
| 57 | |
Srinivas Kandagatla | 004ac38 | 2012-03-20 14:05:40 -0300 | [diff] [blame] | 58 | retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev)); |
Maxim Levitsky | c6ef1e7 | 2010-09-06 18:26:06 -0300 | [diff] [blame] | 59 | spin_unlock_irq(&raw->lock); |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 60 | |
Maxim Levitsky | c6ef1e7 | 2010-09-06 18:26:06 -0300 | [diff] [blame] | 61 | mutex_lock(&ir_raw_handler_lock); |
| 62 | list_for_each_entry(handler, &ir_raw_handler_list, list) |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 63 | handler->decode(raw->dev, ev); |
Maxim Levitsky | c6ef1e7 | 2010-09-06 18:26:06 -0300 | [diff] [blame] | 64 | raw->prev_ev = ev; |
| 65 | mutex_unlock(&ir_raw_handler_lock); |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 66 | } |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 67 | |
| 68 | return 0; |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 69 | } |
| 70 | |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 71 | /** |
| 72 | * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 73 | * @dev: the struct rc_dev device descriptor |
David Härdeman | e40b112 | 2010-04-15 18:46:00 -0300 | [diff] [blame] | 74 | * @ev: the struct ir_raw_event descriptor of the pulse/space |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 75 | * |
| 76 | * This routine (which may be called from an interrupt context) stores a |
| 77 | * pulse/space duration for the raw ir decoding state machines. Pulses are |
| 78 | * signalled as positive values and spaces as negative values. A zero value |
| 79 | * will reset the decoding state machines. |
| 80 | */ |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 81 | int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev) |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 82 | { |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 83 | if (!dev->raw) |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 84 | return -EINVAL; |
| 85 | |
Mauro Carvalho Chehab | 74c4792 | 2010-10-20 11:56:50 -0300 | [diff] [blame] | 86 | IR_dprintk(2, "sample: (%05dus %s)\n", |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 87 | TO_US(ev->duration), TO_STR(ev->pulse)); |
Maxim Levitsky | 510fcb7 | 2010-07-31 11:59:15 -0300 | [diff] [blame] | 88 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 89 | if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev)) |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 90 | return -ENOMEM; |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 91 | |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 92 | return 0; |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 93 | } |
| 94 | EXPORT_SYMBOL_GPL(ir_raw_event_store); |
| 95 | |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 96 | /** |
| 97 | * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 98 | * @dev: the struct rc_dev device descriptor |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 99 | * @type: the type of the event that has occurred |
| 100 | * |
| 101 | * This routine (which may be called from an interrupt context) is used to |
| 102 | * store the beginning of an ir pulse or space (or the start/end of ir |
| 103 | * reception) for the raw ir decoding state machines. This is used by |
| 104 | * hardware which does not provide durations directly but only interrupts |
| 105 | * (or similar events) on state change. |
| 106 | */ |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 107 | int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type) |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 108 | { |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 109 | ktime_t now; |
| 110 | s64 delta; /* ns */ |
Mauro Carvalho Chehab | 8358783 | 2011-01-20 18:16:50 -0300 | [diff] [blame] | 111 | DEFINE_IR_RAW_EVENT(ev); |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 112 | int rc = 0; |
Jarod Wilson | 3f5c4c7 | 2011-06-16 16:18:37 -0300 | [diff] [blame] | 113 | int delay; |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 114 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 115 | if (!dev->raw) |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 116 | return -EINVAL; |
| 117 | |
| 118 | now = ktime_get(); |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 119 | delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event)); |
Jarod Wilson | 3f5c4c7 | 2011-06-16 16:18:37 -0300 | [diff] [blame] | 120 | delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]); |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 121 | |
| 122 | /* Check for a long duration since last event or if we're |
| 123 | * being called for the first time, note that delta can't |
| 124 | * possibly be negative. |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 125 | */ |
Jarod Wilson | 3f5c4c7 | 2011-06-16 16:18:37 -0300 | [diff] [blame] | 126 | if (delta > delay || !dev->raw->last_type) |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 127 | type |= IR_START_EVENT; |
David Härdeman | e40b112 | 2010-04-15 18:46:00 -0300 | [diff] [blame] | 128 | else |
| 129 | ev.duration = delta; |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 130 | |
| 131 | if (type & IR_START_EVENT) |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 132 | ir_raw_event_reset(dev); |
| 133 | else if (dev->raw->last_type & IR_SPACE) { |
David Härdeman | e40b112 | 2010-04-15 18:46:00 -0300 | [diff] [blame] | 134 | ev.pulse = false; |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 135 | rc = ir_raw_event_store(dev, &ev); |
| 136 | } else if (dev->raw->last_type & IR_PULSE) { |
David Härdeman | e40b112 | 2010-04-15 18:46:00 -0300 | [diff] [blame] | 137 | ev.pulse = true; |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 138 | rc = ir_raw_event_store(dev, &ev); |
David Härdeman | e40b112 | 2010-04-15 18:46:00 -0300 | [diff] [blame] | 139 | } else |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 140 | return 0; |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 141 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 142 | dev->raw->last_event = now; |
| 143 | dev->raw->last_type = type; |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 144 | return rc; |
| 145 | } |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 146 | EXPORT_SYMBOL_GPL(ir_raw_event_store_edge); |
| 147 | |
| 148 | /** |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 149 | * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 150 | * @dev: the struct rc_dev device descriptor |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 151 | * @type: the type of the event that has occurred |
| 152 | * |
| 153 | * This routine (which may be called from an interrupt context) works |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 154 | * in similar manner to ir_raw_event_store_edge. |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 155 | * This routine is intended for devices with limited internal buffer |
Sean Young | b83bfd1 | 2012-08-13 08:59:47 -0300 | [diff] [blame] | 156 | * It automerges samples of same type, and handles timeouts. Returns non-zero |
| 157 | * if the event was added, and zero if the event was ignored due to idle |
| 158 | * processing. |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 159 | */ |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 160 | int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev) |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 161 | { |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 162 | if (!dev->raw) |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 163 | return -EINVAL; |
| 164 | |
| 165 | /* Ignore spaces in idle mode */ |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 166 | if (dev->idle && !ev->pulse) |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 167 | return 0; |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 168 | else if (dev->idle) |
| 169 | ir_raw_event_set_idle(dev, false); |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 170 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 171 | if (!dev->raw->this_ev.duration) |
| 172 | dev->raw->this_ev = *ev; |
| 173 | else if (ev->pulse == dev->raw->this_ev.pulse) |
| 174 | dev->raw->this_ev.duration += ev->duration; |
| 175 | else { |
| 176 | ir_raw_event_store(dev, &dev->raw->this_ev); |
| 177 | dev->raw->this_ev = *ev; |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | /* Enter idle mode if nessesary */ |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 181 | if (!ev->pulse && dev->timeout && |
| 182 | dev->raw->this_ev.duration >= dev->timeout) |
| 183 | ir_raw_event_set_idle(dev, true); |
| 184 | |
Sean Young | b83bfd1 | 2012-08-13 08:59:47 -0300 | [diff] [blame] | 185 | return 1; |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 186 | } |
| 187 | EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter); |
| 188 | |
Maxim Levitsky | 4651918 | 2010-10-16 19:56:28 -0300 | [diff] [blame] | 189 | /** |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 190 | * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not |
| 191 | * @dev: the struct rc_dev device descriptor |
| 192 | * @idle: whether the device is idle or not |
Maxim Levitsky | 4651918 | 2010-10-16 19:56:28 -0300 | [diff] [blame] | 193 | */ |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 194 | void ir_raw_event_set_idle(struct rc_dev *dev, bool idle) |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 195 | { |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 196 | if (!dev->raw) |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 197 | return; |
| 198 | |
Maxim Levitsky | 4651918 | 2010-10-16 19:56:28 -0300 | [diff] [blame] | 199 | IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave"); |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 200 | |
| 201 | if (idle) { |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 202 | dev->raw->this_ev.timeout = true; |
| 203 | ir_raw_event_store(dev, &dev->raw->this_ev); |
| 204 | init_ir_raw_event(&dev->raw->this_ev); |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 205 | } |
Maxim Levitsky | 4651918 | 2010-10-16 19:56:28 -0300 | [diff] [blame] | 206 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 207 | if (dev->s_idle) |
| 208 | dev->s_idle(dev, idle); |
| 209 | |
| 210 | dev->idle = idle; |
Maxim Levitsky | 4a702eb | 2010-07-31 11:59:22 -0300 | [diff] [blame] | 211 | } |
| 212 | EXPORT_SYMBOL_GPL(ir_raw_event_set_idle); |
| 213 | |
| 214 | /** |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 215 | * ir_raw_event_handle() - schedules the decoding of stored ir data |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 216 | * @dev: the struct rc_dev device descriptor |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 217 | * |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 218 | * This routine will tell rc-core to start decoding stored ir data. |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 219 | */ |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 220 | void ir_raw_event_handle(struct rc_dev *dev) |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 221 | { |
Maxim Levitsky | c6ef1e7 | 2010-09-06 18:26:06 -0300 | [diff] [blame] | 222 | unsigned long flags; |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 223 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 224 | if (!dev->raw) |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 225 | return; |
| 226 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 227 | spin_lock_irqsave(&dev->raw->lock, flags); |
| 228 | wake_up_process(dev->raw->thread); |
| 229 | spin_unlock_irqrestore(&dev->raw->lock, flags); |
David Härdeman | 724e249 | 2010-04-08 13:10:00 -0300 | [diff] [blame] | 230 | } |
Mauro Carvalho Chehab | a3572c3 | 2010-03-20 20:59:44 -0300 | [diff] [blame] | 231 | EXPORT_SYMBOL_GPL(ir_raw_event_handle); |
Mauro Carvalho Chehab | 995187b | 2010-03-24 20:47:53 -0300 | [diff] [blame] | 232 | |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 233 | /* used internally by the sysfs interface */ |
| 234 | u64 |
Randy Dunlap | 2dbd61b | 2011-01-09 00:53:53 -0300 | [diff] [blame] | 235 | ir_raw_get_allowed_protocols(void) |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 236 | { |
| 237 | u64 protocols; |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 238 | mutex_lock(&ir_raw_handler_lock); |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 239 | protocols = available_protocols; |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 240 | mutex_unlock(&ir_raw_handler_lock); |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 241 | return protocols; |
| 242 | } |
| 243 | |
James Hogan | 0d830b2 | 2015-03-31 14:48:10 -0300 | [diff] [blame] | 244 | /* used internally by the sysfs interface */ |
| 245 | u64 |
| 246 | ir_raw_get_encode_protocols(void) |
| 247 | { |
| 248 | u64 protocols; |
| 249 | |
| 250 | mutex_lock(&ir_raw_handler_lock); |
| 251 | protocols = encode_protocols; |
| 252 | mutex_unlock(&ir_raw_handler_lock); |
| 253 | return protocols; |
| 254 | } |
| 255 | |
David Härdeman | da6e162 | 2014-04-03 20:32:16 -0300 | [diff] [blame] | 256 | static int change_protocol(struct rc_dev *dev, u64 *rc_type) |
| 257 | { |
| 258 | /* the caller will update dev->enabled_protocols */ |
| 259 | return 0; |
| 260 | } |
| 261 | |
James Hogan | 9869da5 | 2015-03-31 14:48:06 -0300 | [diff] [blame] | 262 | /** |
Antti Seppälä | 1d971d9 | 2015-03-31 14:48:07 -0300 | [diff] [blame] | 263 | * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation. |
| 264 | * @ev: Pointer to pointer to next free event. *@ev is incremented for |
| 265 | * each raw event filled. |
| 266 | * @max: Maximum number of raw events to fill. |
| 267 | * @timings: Manchester modulation timings. |
| 268 | * @n: Number of bits of data. |
| 269 | * @data: Data bits to encode. |
| 270 | * |
| 271 | * Encodes the @n least significant bits of @data using Manchester (bi-phase) |
| 272 | * modulation with the timing characteristics described by @timings, writing up |
| 273 | * to @max raw IR events using the *@ev pointer. |
| 274 | * |
| 275 | * Returns: 0 on success. |
| 276 | * -ENOBUFS if there isn't enough space in the array to fit the |
| 277 | * full encoded data. In this case all @max events will have been |
| 278 | * written. |
| 279 | */ |
| 280 | int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max, |
| 281 | const struct ir_raw_timings_manchester *timings, |
| 282 | unsigned int n, unsigned int data) |
| 283 | { |
| 284 | bool need_pulse; |
| 285 | unsigned int i; |
| 286 | int ret = -ENOBUFS; |
| 287 | |
| 288 | i = 1 << (n - 1); |
| 289 | |
| 290 | if (timings->leader) { |
| 291 | if (!max--) |
| 292 | return ret; |
| 293 | if (timings->pulse_space_start) { |
| 294 | init_ir_raw_event_duration((*ev)++, 1, timings->leader); |
| 295 | |
| 296 | if (!max--) |
| 297 | return ret; |
| 298 | init_ir_raw_event_duration((*ev), 0, timings->leader); |
| 299 | } else { |
| 300 | init_ir_raw_event_duration((*ev), 1, timings->leader); |
| 301 | } |
| 302 | i >>= 1; |
| 303 | } else { |
| 304 | /* continue existing signal */ |
| 305 | --(*ev); |
| 306 | } |
| 307 | /* from here on *ev will point to the last event rather than the next */ |
| 308 | |
| 309 | while (n && i > 0) { |
| 310 | need_pulse = !(data & i); |
| 311 | if (timings->invert) |
| 312 | need_pulse = !need_pulse; |
| 313 | if (need_pulse == !!(*ev)->pulse) { |
| 314 | (*ev)->duration += timings->clock; |
| 315 | } else { |
| 316 | if (!max--) |
| 317 | goto nobufs; |
| 318 | init_ir_raw_event_duration(++(*ev), need_pulse, |
| 319 | timings->clock); |
| 320 | } |
| 321 | |
| 322 | if (!max--) |
| 323 | goto nobufs; |
| 324 | init_ir_raw_event_duration(++(*ev), !need_pulse, |
| 325 | timings->clock); |
| 326 | i >>= 1; |
| 327 | } |
| 328 | |
| 329 | if (timings->trailer_space) { |
| 330 | if (!(*ev)->pulse) |
| 331 | (*ev)->duration += timings->trailer_space; |
| 332 | else if (!max--) |
| 333 | goto nobufs; |
| 334 | else |
| 335 | init_ir_raw_event_duration(++(*ev), 0, |
| 336 | timings->trailer_space); |
| 337 | } |
| 338 | |
| 339 | ret = 0; |
| 340 | nobufs: |
| 341 | /* point to the next event rather than last event before returning */ |
| 342 | ++(*ev); |
| 343 | return ret; |
| 344 | } |
| 345 | EXPORT_SYMBOL(ir_raw_gen_manchester); |
| 346 | |
| 347 | /** |
James Hogan | 9869da5 | 2015-03-31 14:48:06 -0300 | [diff] [blame] | 348 | * ir_raw_encode_scancode() - Encode a scancode as raw events |
| 349 | * |
| 350 | * @protocols: permitted protocols |
| 351 | * @scancode: scancode filter describing a single scancode |
| 352 | * @events: array of raw events to write into |
| 353 | * @max: max number of raw events |
| 354 | * |
| 355 | * Attempts to encode the scancode as raw events. |
| 356 | * |
| 357 | * Returns: The number of events written. |
| 358 | * -ENOBUFS if there isn't enough space in the array to fit the |
| 359 | * encoding. In this case all @max events will have been written. |
| 360 | * -EINVAL if the scancode is ambiguous or invalid, or if no |
| 361 | * compatible encoder was found. |
| 362 | */ |
| 363 | int ir_raw_encode_scancode(u64 protocols, |
| 364 | const struct rc_scancode_filter *scancode, |
| 365 | struct ir_raw_event *events, unsigned int max) |
| 366 | { |
| 367 | struct ir_raw_handler *handler; |
| 368 | int ret = -EINVAL; |
| 369 | |
| 370 | mutex_lock(&ir_raw_handler_lock); |
| 371 | list_for_each_entry(handler, &ir_raw_handler_list, list) { |
| 372 | if (handler->protocols & protocols && handler->encode) { |
| 373 | ret = handler->encode(protocols, scancode, events, max); |
| 374 | if (ret >= 0 || ret == -ENOBUFS) |
| 375 | break; |
| 376 | } |
| 377 | } |
| 378 | mutex_unlock(&ir_raw_handler_lock); |
| 379 | |
| 380 | return ret; |
| 381 | } |
| 382 | EXPORT_SYMBOL(ir_raw_encode_scancode); |
| 383 | |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 384 | /* |
| 385 | * Used to (un)register raw event clients |
| 386 | */ |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 387 | int ir_raw_event_register(struct rc_dev *dev) |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 388 | { |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 389 | int rc; |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 390 | struct ir_raw_handler *handler; |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 391 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 392 | if (!dev) |
| 393 | return -EINVAL; |
| 394 | |
| 395 | dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL); |
| 396 | if (!dev->raw) |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 397 | return -ENOMEM; |
| 398 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 399 | dev->raw->dev = dev; |
David Härdeman | da6e162 | 2014-04-03 20:32:16 -0300 | [diff] [blame] | 400 | dev->change_protocol = change_protocol; |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 401 | rc = kfifo_alloc(&dev->raw->kfifo, |
| 402 | sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE, |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 403 | GFP_KERNEL); |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 404 | if (rc < 0) |
| 405 | goto out; |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 406 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 407 | spin_lock_init(&dev->raw->lock); |
| 408 | dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, |
| 409 | "rc%ld", dev->devno); |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 410 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 411 | if (IS_ERR(dev->raw->thread)) { |
| 412 | rc = PTR_ERR(dev->raw->thread); |
| 413 | goto out; |
Maxim Levitsky | 0d2cb1d | 2010-07-31 11:59:17 -0300 | [diff] [blame] | 414 | } |
| 415 | |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 416 | mutex_lock(&ir_raw_handler_lock); |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 417 | list_add_tail(&dev->raw->list, &ir_raw_client_list); |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 418 | list_for_each_entry(handler, &ir_raw_handler_list, list) |
| 419 | if (handler->raw_register) |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 420 | handler->raw_register(dev); |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 421 | mutex_unlock(&ir_raw_handler_lock); |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 422 | |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 423 | return 0; |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 424 | |
| 425 | out: |
| 426 | kfree(dev->raw); |
| 427 | dev->raw = NULL; |
| 428 | return rc; |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 429 | } |
| 430 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 431 | void ir_raw_event_unregister(struct rc_dev *dev) |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 432 | { |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 433 | struct ir_raw_handler *handler; |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 434 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 435 | if (!dev || !dev->raw) |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 436 | return; |
| 437 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 438 | kthread_stop(dev->raw->thread); |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 439 | |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 440 | mutex_lock(&ir_raw_handler_lock); |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 441 | list_del(&dev->raw->list); |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 442 | list_for_each_entry(handler, &ir_raw_handler_list, list) |
| 443 | if (handler->raw_unregister) |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 444 | handler->raw_unregister(dev); |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 445 | mutex_unlock(&ir_raw_handler_lock); |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 446 | |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 447 | kfifo_free(&dev->raw->kfifo); |
| 448 | kfree(dev->raw); |
| 449 | dev->raw = NULL; |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 450 | } |
| 451 | |
Mauro Carvalho Chehab | 995187b | 2010-03-24 20:47:53 -0300 | [diff] [blame] | 452 | /* |
| 453 | * Extension interface - used to register the IR decoders |
| 454 | */ |
| 455 | |
| 456 | int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler) |
| 457 | { |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 458 | struct ir_raw_event_ctrl *raw; |
| 459 | |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 460 | mutex_lock(&ir_raw_handler_lock); |
Mauro Carvalho Chehab | 995187b | 2010-03-24 20:47:53 -0300 | [diff] [blame] | 461 | list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list); |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 462 | if (ir_raw_handler->raw_register) |
| 463 | list_for_each_entry(raw, &ir_raw_client_list, list) |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 464 | ir_raw_handler->raw_register(raw->dev); |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 465 | available_protocols |= ir_raw_handler->protocols; |
James Hogan | 0d830b2 | 2015-03-31 14:48:10 -0300 | [diff] [blame] | 466 | if (ir_raw_handler->encode) |
| 467 | encode_protocols |= ir_raw_handler->protocols; |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 468 | mutex_unlock(&ir_raw_handler_lock); |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 469 | |
Mauro Carvalho Chehab | 995187b | 2010-03-24 20:47:53 -0300 | [diff] [blame] | 470 | return 0; |
| 471 | } |
| 472 | EXPORT_SYMBOL(ir_raw_handler_register); |
| 473 | |
| 474 | void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler) |
| 475 | { |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 476 | struct ir_raw_event_ctrl *raw; |
| 477 | |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 478 | mutex_lock(&ir_raw_handler_lock); |
Mauro Carvalho Chehab | 995187b | 2010-03-24 20:47:53 -0300 | [diff] [blame] | 479 | list_del(&ir_raw_handler->list); |
David Härdeman | c216369 | 2010-06-13 17:29:36 -0300 | [diff] [blame] | 480 | if (ir_raw_handler->raw_unregister) |
| 481 | list_for_each_entry(raw, &ir_raw_client_list, list) |
David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 482 | ir_raw_handler->raw_unregister(raw->dev); |
David Härdeman | 667c9eb | 2010-06-13 17:29:31 -0300 | [diff] [blame] | 483 | available_protocols &= ~ir_raw_handler->protocols; |
James Hogan | 0d830b2 | 2015-03-31 14:48:10 -0300 | [diff] [blame] | 484 | if (ir_raw_handler->encode) |
| 485 | encode_protocols &= ~ir_raw_handler->protocols; |
Maxim Levitsky | 45a568f | 2010-07-31 11:59:16 -0300 | [diff] [blame] | 486 | mutex_unlock(&ir_raw_handler_lock); |
Mauro Carvalho Chehab | 995187b | 2010-03-24 20:47:53 -0300 | [diff] [blame] | 487 | } |
| 488 | EXPORT_SYMBOL(ir_raw_handler_unregister); |
| 489 | |
Konstantin Khlebnikov | a4bb6f3 | 2012-12-14 07:02:48 -0300 | [diff] [blame] | 490 | void ir_raw_init(void) |
Mauro Carvalho Chehab | 995187b | 2010-03-24 20:47:53 -0300 | [diff] [blame] | 491 | { |
| 492 | /* Load the decoder modules */ |
| 493 | |
| 494 | load_nec_decode(); |
Mauro Carvalho Chehab | db1423a | 2010-04-04 10:27:20 -0300 | [diff] [blame] | 495 | load_rc5_decode(); |
David Härdeman | 784a493 | 2010-04-08 20:04:40 -0300 | [diff] [blame] | 496 | load_rc6_decode(); |
David Härdeman | bf670f6 | 2010-04-15 18:46:05 -0300 | [diff] [blame] | 497 | load_jvc_decode(); |
David Härdeman | 3fe29c8 | 2010-04-15 18:46:10 -0300 | [diff] [blame] | 498 | load_sony_decode(); |
Mauro Carvalho Chehab | b32e724 | 2011-11-23 12:04:08 -0300 | [diff] [blame] | 499 | load_sanyo_decode(); |
James Hogan | 324a667 | 2014-02-05 19:15:16 -0300 | [diff] [blame] | 500 | load_sharp_decode(); |
Jarod Wilson | f5f2cc6 | 2011-07-13 18:09:48 -0300 | [diff] [blame] | 501 | load_mce_kbd_decode(); |
Jarod Wilson | ca41469 | 2010-07-03 01:07:53 -0300 | [diff] [blame] | 502 | load_lirc_codec(); |
Marcel J.E. Mol | 1dee9b5 | 2014-07-26 17:28:26 -0300 | [diff] [blame] | 503 | load_xmp_decode(); |
Mauro Carvalho Chehab | 995187b | 2010-03-24 20:47:53 -0300 | [diff] [blame] | 504 | |
| 505 | /* If needed, we may later add some init code. In this case, |
Mauro Carvalho Chehab | 6bda964 | 2010-11-17 13:28:38 -0300 | [diff] [blame] | 506 | it is needed to change the CONFIG_MODULE test at rc-core.h |
Mauro Carvalho Chehab | 995187b | 2010-03-24 20:47:53 -0300 | [diff] [blame] | 507 | */ |
| 508 | } |