1 /* rc-ir-raw.c - handle IR pulse/space events
3 * Copyright (C) 2010 by Mauro Carvalho Chehab
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/export.h>
16 #include <linux/kthread.h>
17 #include <linux/mutex.h>
18 #include <linux/kmod.h>
19 #include <linux/sched.h>
20 #include <linux/freezer.h>
21 #include "rc-core-priv.h"
23 /* Define the max number of pulse/space transitions to buffer */
24 #define MAX_IR_EVENT_SIZE 512
26 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
27 static LIST_HEAD(ir_raw_client_list);
29 /* Used to handle IR raw handler extensions */
30 static DEFINE_MUTEX(ir_raw_handler_lock);
31 static LIST_HEAD(ir_raw_handler_list);
32 static u64 available_protocols;
34 static int ir_raw_event_thread(void *data)
36 struct ir_raw_event ev;
37 struct ir_raw_handler *handler;
38 struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
41 while (!kthread_should_stop()) {
43 spin_lock_irq(&raw->lock);
44 retval = kfifo_len(&raw->kfifo);
46 if (retval < sizeof(ev)) {
47 set_current_state(TASK_INTERRUPTIBLE);
49 if (kthread_should_stop())
50 set_current_state(TASK_RUNNING);
52 spin_unlock_irq(&raw->lock);
57 retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
58 spin_unlock_irq(&raw->lock);
60 mutex_lock(&ir_raw_handler_lock);
61 list_for_each_entry(handler, &ir_raw_handler_list, list)
62 handler->decode(raw->dev, ev);
64 mutex_unlock(&ir_raw_handler_lock);
71 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
72 * @dev: the struct rc_dev device descriptor
73 * @ev: the struct ir_raw_event descriptor of the pulse/space
75 * This routine (which may be called from an interrupt context) stores a
76 * pulse/space duration for the raw ir decoding state machines. Pulses are
77 * signalled as positive values and spaces as negative values. A zero value
78 * will reset the decoding state machines.
80 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
85 IR_dprintk(2, "sample: (%05dus %s)\n",
86 TO_US(ev->duration), TO_STR(ev->pulse));
88 if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
93 EXPORT_SYMBOL_GPL(ir_raw_event_store);
96 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
97 * @dev: the struct rc_dev device descriptor
98 * @type: the type of the event that has occurred
100 * This routine (which may be called from an interrupt context) is used to
101 * store the beginning of an ir pulse or space (or the start/end of ir
102 * reception) for the raw ir decoding state machines. This is used by
103 * hardware which does not provide durations directly but only interrupts
104 * (or similar events) on state change.
106 int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
110 DEFINE_IR_RAW_EVENT(ev);
118 delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
119 delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
121 /* Check for a long duration since last event or if we're
122 * being called for the first time, note that delta can't
123 * possibly be negative.
125 if (delta > delay || !dev->raw->last_type)
126 type |= IR_START_EVENT;
130 if (type & IR_START_EVENT)
131 ir_raw_event_reset(dev);
132 else if (dev->raw->last_type & IR_SPACE) {
134 rc = ir_raw_event_store(dev, &ev);
135 } else if (dev->raw->last_type & IR_PULSE) {
137 rc = ir_raw_event_store(dev, &ev);
141 dev->raw->last_event = now;
142 dev->raw->last_type = type;
145 EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
148 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
149 * @dev: the struct rc_dev device descriptor
150 * @type: the type of the event that has occurred
152 * This routine (which may be called from an interrupt context) works
153 * in similar manner to ir_raw_event_store_edge.
154 * This routine is intended for devices with limited internal buffer
155 * It automerges samples of same type, and handles timeouts. Returns non-zero
156 * if the event was added, and zero if the event was ignored due to idle
159 int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
164 /* Ignore spaces in idle mode */
165 if (dev->idle && !ev->pulse)
168 ir_raw_event_set_idle(dev, false);
170 if (!dev->raw->this_ev.duration)
171 dev->raw->this_ev = *ev;
172 else if (ev->pulse == dev->raw->this_ev.pulse)
173 dev->raw->this_ev.duration += ev->duration;
175 ir_raw_event_store(dev, &dev->raw->this_ev);
176 dev->raw->this_ev = *ev;
179 /* Enter idle mode if nessesary */
180 if (!ev->pulse && dev->timeout &&
181 dev->raw->this_ev.duration >= dev->timeout)
182 ir_raw_event_set_idle(dev, true);
186 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
189 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
190 * @dev: the struct rc_dev device descriptor
191 * @idle: whether the device is idle or not
193 void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
198 IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
201 dev->raw->this_ev.timeout = true;
202 ir_raw_event_store(dev, &dev->raw->this_ev);
203 init_ir_raw_event(&dev->raw->this_ev);
207 dev->s_idle(dev, idle);
211 EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
214 * ir_raw_event_handle() - schedules the decoding of stored ir data
215 * @dev: the struct rc_dev device descriptor
217 * This routine will tell rc-core to start decoding stored ir data.
219 void ir_raw_event_handle(struct rc_dev *dev)
226 spin_lock_irqsave(&dev->raw->lock, flags);
227 wake_up_process(dev->raw->thread);
228 spin_unlock_irqrestore(&dev->raw->lock, flags);
230 EXPORT_SYMBOL_GPL(ir_raw_event_handle);
232 /* used internally by the sysfs interface */
234 ir_raw_get_allowed_protocols(void)
237 mutex_lock(&ir_raw_handler_lock);
238 protocols = available_protocols;
239 mutex_unlock(&ir_raw_handler_lock);
243 static int change_protocol(struct rc_dev *dev, u64 *rc_type)
245 /* the caller will update dev->enabled_protocols */
250 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
251 * @ev: Pointer to pointer to next free event. *@ev is incremented for
252 * each raw event filled.
253 * @max: Maximum number of raw events to fill.
254 * @timings: Manchester modulation timings.
255 * @n: Number of bits of data.
256 * @data: Data bits to encode.
258 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
259 * modulation with the timing characteristics described by @timings, writing up
260 * to @max raw IR events using the *@ev pointer.
262 * Returns: 0 on success.
263 * -ENOBUFS if there isn't enough space in the array to fit the
264 * full encoded data. In this case all @max events will have been
267 int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
268 const struct ir_raw_timings_manchester *timings,
269 unsigned int n, unsigned int data)
277 if (timings->leader) {
280 if (timings->pulse_space_start) {
281 init_ir_raw_event_duration((*ev)++, 1, timings->leader);
285 init_ir_raw_event_duration((*ev), 0, timings->leader);
287 init_ir_raw_event_duration((*ev), 1, timings->leader);
291 /* continue existing signal */
294 /* from here on *ev will point to the last event rather than the next */
297 need_pulse = !(data & i);
299 need_pulse = !need_pulse;
300 if (need_pulse == !!(*ev)->pulse) {
301 (*ev)->duration += timings->clock;
305 init_ir_raw_event_duration(++(*ev), need_pulse,
311 init_ir_raw_event_duration(++(*ev), !need_pulse,
316 if (timings->trailer_space) {
318 (*ev)->duration += timings->trailer_space;
322 init_ir_raw_event_duration(++(*ev), 0,
323 timings->trailer_space);
328 /* point to the next event rather than last event before returning */
332 EXPORT_SYMBOL(ir_raw_gen_manchester);
335 * ir_raw_encode_scancode() - Encode a scancode as raw events
337 * @protocols: permitted protocols
338 * @scancode: scancode filter describing a single scancode
339 * @events: array of raw events to write into
340 * @max: max number of raw events
342 * Attempts to encode the scancode as raw events.
344 * Returns: The number of events written.
345 * -ENOBUFS if there isn't enough space in the array to fit the
346 * encoding. In this case all @max events will have been written.
347 * -EINVAL if the scancode is ambiguous or invalid, or if no
348 * compatible encoder was found.
350 int ir_raw_encode_scancode(u64 protocols,
351 const struct rc_scancode_filter *scancode,
352 struct ir_raw_event *events, unsigned int max)
354 struct ir_raw_handler *handler;
357 mutex_lock(&ir_raw_handler_lock);
358 list_for_each_entry(handler, &ir_raw_handler_list, list) {
359 if (handler->protocols & protocols && handler->encode) {
360 ret = handler->encode(protocols, scancode, events, max);
361 if (ret >= 0 || ret == -ENOBUFS)
365 mutex_unlock(&ir_raw_handler_lock);
369 EXPORT_SYMBOL(ir_raw_encode_scancode);
372 * Used to (un)register raw event clients
374 int ir_raw_event_register(struct rc_dev *dev)
377 struct ir_raw_handler *handler;
382 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
387 dev->change_protocol = change_protocol;
388 rc = kfifo_alloc(&dev->raw->kfifo,
389 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
394 spin_lock_init(&dev->raw->lock);
395 dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
396 "rc%ld", dev->devno);
398 if (IS_ERR(dev->raw->thread)) {
399 rc = PTR_ERR(dev->raw->thread);
403 mutex_lock(&ir_raw_handler_lock);
404 list_add_tail(&dev->raw->list, &ir_raw_client_list);
405 list_for_each_entry(handler, &ir_raw_handler_list, list)
406 if (handler->raw_register)
407 handler->raw_register(dev);
408 mutex_unlock(&ir_raw_handler_lock);
418 void ir_raw_event_unregister(struct rc_dev *dev)
420 struct ir_raw_handler *handler;
422 if (!dev || !dev->raw)
425 kthread_stop(dev->raw->thread);
427 mutex_lock(&ir_raw_handler_lock);
428 list_del(&dev->raw->list);
429 list_for_each_entry(handler, &ir_raw_handler_list, list)
430 if (handler->raw_unregister)
431 handler->raw_unregister(dev);
432 mutex_unlock(&ir_raw_handler_lock);
434 kfifo_free(&dev->raw->kfifo);
440 * Extension interface - used to register the IR decoders
443 int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
445 struct ir_raw_event_ctrl *raw;
447 mutex_lock(&ir_raw_handler_lock);
448 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
449 if (ir_raw_handler->raw_register)
450 list_for_each_entry(raw, &ir_raw_client_list, list)
451 ir_raw_handler->raw_register(raw->dev);
452 available_protocols |= ir_raw_handler->protocols;
453 mutex_unlock(&ir_raw_handler_lock);
457 EXPORT_SYMBOL(ir_raw_handler_register);
459 void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
461 struct ir_raw_event_ctrl *raw;
463 mutex_lock(&ir_raw_handler_lock);
464 list_del(&ir_raw_handler->list);
465 if (ir_raw_handler->raw_unregister)
466 list_for_each_entry(raw, &ir_raw_client_list, list)
467 ir_raw_handler->raw_unregister(raw->dev);
468 available_protocols &= ~ir_raw_handler->protocols;
469 mutex_unlock(&ir_raw_handler_lock);
471 EXPORT_SYMBOL(ir_raw_handler_unregister);
473 void ir_raw_init(void)
475 /* Load the decoder modules */
484 load_mce_kbd_decode();
488 /* If needed, we may later add some init code. In this case,
489 it is needed to change the CONFIG_MODULE test at rc-core.h