^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // rc-ir-raw.c - handle IR pulse/space events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) // Copyright (C) 2010 by Mauro Carvalho Chehab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "rc-core-priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static LIST_HEAD(ir_raw_client_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /* Used to handle IR raw handler extensions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) DEFINE_MUTEX(ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static LIST_HEAD(ir_raw_handler_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static atomic64_t available_protocols = ATOMIC64_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static int ir_raw_event_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct ir_raw_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct ir_raw_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct ir_raw_event_ctrl *raw = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct rc_dev *dev = raw->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) mutex_lock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) while (kfifo_out(&raw->kfifo, &ev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (is_timing_event(ev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (ev.duration == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (is_timing_event(raw->prev_ev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) !is_transition(&ev, &raw->prev_ev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) dev_warn_once(&dev->dev, "two consecutive events of type %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) TO_STR(ev.pulse));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (raw->prev_ev.reset && ev.pulse == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) dev_warn_once(&dev->dev, "timing event after reset should be pulse");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) list_for_each_entry(handler, &ir_raw_handler_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (dev->enabled_protocols &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) handler->protocols || !handler->protocols)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) handler->decode(dev, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) lirc_raw_event(dev, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) raw->prev_ev = ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) mutex_unlock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) } else if (!kfifo_is_empty(&raw->kfifo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @dev: the struct rc_dev device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @ev: the struct ir_raw_event descriptor of the pulse/space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * This routine (which may be called from an interrupt context) stores a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * pulse/space duration for the raw ir decoding state machines. Pulses are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * signalled as positive values and spaces as negative values. A zero value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * will reset the decoding state machines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!dev->raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ev->duration, TO_STR(ev->pulse));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!kfifo_put(&dev->raw->kfifo, *ev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) dev_err(&dev->dev, "IR event FIFO is full!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) EXPORT_SYMBOL_GPL(ir_raw_event_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @dev: the struct rc_dev device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @pulse: true for pulse, false for space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * This routine (which may be called from an interrupt context) is used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * store the beginning of an ir pulse or space (or the start/end of ir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * reception) for the raw ir decoding state machines. This is used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * hardware which does not provide durations directly but only interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * (or similar events) on state change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ktime_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct ir_raw_event ev = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (!dev->raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ev.pulse = !pulse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return ir_raw_event_store_with_timeout(dev, &ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * ir decoders, schedule decoding and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * @dev: the struct rc_dev device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * @ev: the struct ir_raw_event descriptor of the pulse/space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * This routine (which may be called from an interrupt context) stores a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * pulse/space duration for the raw ir decoding state machines, schedules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * decoding and generates a timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ktime_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!dev->raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spin_lock(&dev->raw->edge_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) rc = ir_raw_event_store(dev, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) dev->raw->last_event = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* timer could be set to timeout (125ms by default) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (!timer_pending(&dev->raw->edge_handle) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) time_after(dev->raw->edge_handle.expires,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) jiffies + msecs_to_jiffies(15))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) mod_timer(&dev->raw->edge_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) jiffies + msecs_to_jiffies(15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) spin_unlock(&dev->raw->edge_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @dev: the struct rc_dev device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * @ev: the event that has occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * This routine (which may be called from an interrupt context) works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * in similar manner to ir_raw_event_store_edge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * This routine is intended for devices with limited internal buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * It automerges samples of same type, and handles timeouts. Returns non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * if the event was added, and zero if the event was ignored due to idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!dev->raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* Ignore spaces in idle mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (dev->idle && !ev->pulse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) else if (dev->idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ir_raw_event_set_idle(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!dev->raw->this_ev.duration)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) dev->raw->this_ev = *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) else if (ev->pulse == dev->raw->this_ev.pulse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) dev->raw->this_ev.duration += ev->duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ir_raw_event_store(dev, &dev->raw->this_ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dev->raw->this_ev = *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Enter idle mode if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!ev->pulse && dev->timeout &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dev->raw->this_ev.duration >= dev->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ir_raw_event_set_idle(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * @dev: the struct rc_dev device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * @idle: whether the device is idle or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!dev->raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dev->raw->this_ev.timeout = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ir_raw_event_store(dev, &dev->raw->this_ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) dev->raw->this_ev = (struct ir_raw_event) {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (dev->s_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) dev->s_idle(dev, idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) dev->idle = idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * ir_raw_event_handle() - schedules the decoding of stored ir data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * @dev: the struct rc_dev device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * This routine will tell rc-core to start decoding stored ir data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void ir_raw_event_handle(struct rc_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!dev->raw || !dev->raw->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) wake_up_process(dev->raw->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) EXPORT_SYMBOL_GPL(ir_raw_event_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* used internally by the sysfs interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ir_raw_get_allowed_protocols(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return atomic64_read(&available_protocols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct ir_raw_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) u32 timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) mutex_lock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) list_for_each_entry(handler, &ir_raw_handler_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!(dev->enabled_protocols & handler->protocols) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) (*rc_proto & handler->protocols) && handler->raw_register)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) handler->raw_register(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if ((dev->enabled_protocols & handler->protocols) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) !(*rc_proto & handler->protocols) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) handler->raw_unregister)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) handler->raw_unregister(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) mutex_unlock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (!dev->max_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) mutex_lock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) list_for_each_entry(handler, &ir_raw_handler_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (handler->protocols & *rc_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (timeout < handler->min_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) timeout = handler->min_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) mutex_unlock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (timeout == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) timeout = IR_DEFAULT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) timeout += MS_TO_US(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (timeout < dev->min_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) timeout = dev->min_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) else if (timeout > dev->max_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) timeout = dev->max_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (dev->s_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dev->s_timeout(dev, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) dev->timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) mutex_lock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dev->enabled_protocols &= ~protocols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mutex_unlock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @ev: Pointer to pointer to next free event. *@ev is incremented for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * each raw event filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * @max: Maximum number of raw events to fill.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * @timings: Manchester modulation timings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * @n: Number of bits of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * @data: Data bits to encode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * Encodes the @n least significant bits of @data using Manchester (bi-phase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * modulation with the timing characteristics described by @timings, writing up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * to @max raw IR events using the *@ev pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Returns: 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * -ENOBUFS if there isn't enough space in the array to fit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * full encoded data. In this case all @max events will have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) const struct ir_raw_timings_manchester *timings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) unsigned int n, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) bool need_pulse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int ret = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) i = BIT_ULL(n - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (timings->leader_pulse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) init_ir_raw_event_duration((*ev), 1, timings->leader_pulse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (timings->leader_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) init_ir_raw_event_duration(++(*ev), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) timings->leader_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* continue existing signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) --(*ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* from here on *ev will point to the last event rather than the next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) while (n && i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) need_pulse = !(data & i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (timings->invert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) need_pulse = !need_pulse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (need_pulse == !!(*ev)->pulse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) (*ev)->duration += timings->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) goto nobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) init_ir_raw_event_duration(++(*ev), need_pulse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) timings->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto nobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) init_ir_raw_event_duration(++(*ev), !need_pulse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) timings->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) i >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (timings->trailer_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!(*ev)->pulse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) (*ev)->duration += timings->trailer_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) else if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto nobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) init_ir_raw_event_duration(++(*ev), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) timings->trailer_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) nobufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* point to the next event rather than last event before returning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ++(*ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) EXPORT_SYMBOL(ir_raw_gen_manchester);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * @ev: Pointer to pointer to next free event. *@ev is incremented for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * each raw event filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * @max: Maximum number of raw events to fill.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * @timings: Pulse distance modulation timings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * @n: Number of bits of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @data: Data bits to encode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Encodes the @n least significant bits of @data using pulse-distance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * modulation with the timing characteristics described by @timings, writing up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * to @max raw IR events using the *@ev pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * Returns: 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * -ENOBUFS if there isn't enough space in the array to fit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * full encoded data. In this case all @max events will have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) const struct ir_raw_timings_pd *timings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned int n, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) unsigned int space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (timings->header_pulse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) timings->header_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (timings->msb_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) for (i = n - 1; i >= 0; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) space = timings->bit_space[(data >> i) & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ret = ir_raw_gen_pulse_space(ev, &max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) timings->bit_pulse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) for (i = 0; i < n; ++i, data >>= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) space = timings->bit_space[data & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ret = ir_raw_gen_pulse_space(ev, &max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) timings->bit_pulse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) timings->trailer_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) EXPORT_SYMBOL(ir_raw_gen_pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * @ev: Pointer to pointer to next free event. *@ev is incremented for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * each raw event filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * @max: Maximum number of raw events to fill.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * @timings: Pulse distance modulation timings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * @n: Number of bits of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * @data: Data bits to encode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * Encodes the @n least significant bits of @data using space-distance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * modulation with the timing characteristics described by @timings, writing up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * to @max raw IR events using the *@ev pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Returns: 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * -ENOBUFS if there isn't enough space in the array to fit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * full encoded data. In this case all @max events will have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) const struct ir_raw_timings_pl *timings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) unsigned int n, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int ret = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) unsigned int pulse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (timings->msb_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) for (i = n - 1; i >= 0; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) init_ir_raw_event_duration((*ev)++, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) timings->bit_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pulse = timings->bit_pulse[(data >> i) & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) init_ir_raw_event_duration((*ev)++, 1, pulse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) for (i = 0; i < n; ++i, data >>= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) init_ir_raw_event_duration((*ev)++, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) timings->bit_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) pulse = timings->bit_pulse[data & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) init_ir_raw_event_duration((*ev)++, 1, pulse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (!max--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) EXPORT_SYMBOL(ir_raw_gen_pl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * ir_raw_encode_scancode() - Encode a scancode as raw events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * @protocol: protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * @scancode: scancode filter describing a single scancode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * @events: array of raw events to write into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @max: max number of raw events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * Attempts to encode the scancode as raw events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * Returns: The number of events written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * -ENOBUFS if there isn't enough space in the array to fit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * encoding. In this case all @max events will have been written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * -EINVAL if the scancode is ambiguous or invalid, or if no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * compatible encoder was found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct ir_raw_event *events, unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct ir_raw_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) u64 mask = 1ULL << protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ir_raw_load_modules(&mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) mutex_lock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) list_for_each_entry(handler, &ir_raw_handler_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (handler->protocols & mask && handler->encode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = handler->encode(protocol, scancode, events, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (ret >= 0 || ret == -ENOBUFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) mutex_unlock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) EXPORT_SYMBOL(ir_raw_encode_scancode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * @t: timer_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * This callback is armed by ir_raw_event_store_edge(). It does two things:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * first of all, rather than calling ir_raw_event_handle() for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * edge and waking up the rc thread, 15 ms after the first edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * ir_raw_event_handle() is called. Secondly, generate a timeout event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * no more IR is received after the rc_dev timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static void ir_raw_edge_handle(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct rc_dev *dev = raw->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ktime_t interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) interval = ktime_sub(ktime_get(), dev->raw->last_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (ktime_to_us(interval) >= dev->timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct ir_raw_event ev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) .timeout = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) .duration = ktime_to_us(interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ir_raw_event_store(dev, &ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) mod_timer(&dev->raw->edge_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) jiffies + usecs_to_jiffies(dev->timeout -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ktime_to_us(interval)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ir_raw_event_handle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * ir_raw_encode_carrier() - Get carrier used for protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * @protocol: protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * Attempts to find the carrier for the specified protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * Returns: The carrier in Hz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * -EINVAL if the protocol is invalid, or if no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * compatible encoder was found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) int ir_raw_encode_carrier(enum rc_proto protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct ir_raw_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) u64 mask = BIT_ULL(protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) mutex_lock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) list_for_each_entry(handler, &ir_raw_handler_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (handler->protocols & mask && handler->encode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ret = handler->carrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) mutex_unlock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) EXPORT_SYMBOL(ir_raw_encode_carrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * Used to (un)register raw event clients
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) int ir_raw_event_prepare(struct rc_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!dev->raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) dev->raw->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dev->change_protocol = change_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dev->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) spin_lock_init(&dev->raw->edge_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) INIT_KFIFO(dev->raw->kfifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int ir_raw_event_register(struct rc_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct task_struct *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (IS_ERR(thread))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return PTR_ERR(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dev->raw->thread = thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) mutex_lock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) list_add_tail(&dev->raw->list, &ir_raw_client_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) mutex_unlock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) void ir_raw_event_free(struct rc_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) kfree(dev->raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dev->raw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) void ir_raw_event_unregister(struct rc_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct ir_raw_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (!dev || !dev->raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) kthread_stop(dev->raw->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) del_timer_sync(&dev->raw->edge_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) mutex_lock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) list_del(&dev->raw->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) list_for_each_entry(handler, &ir_raw_handler_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (handler->raw_unregister &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) (handler->protocols & dev->enabled_protocols))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) handler->raw_unregister(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) lirc_bpf_free(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ir_raw_event_free(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * ensure that the raw member is null on unlock; this is how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * "device gone" is checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) mutex_unlock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * Extension interface - used to register the IR decoders
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) mutex_lock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) atomic64_or(ir_raw_handler->protocols, &available_protocols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) mutex_unlock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) EXPORT_SYMBOL(ir_raw_handler_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct ir_raw_event_ctrl *raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) u64 protocols = ir_raw_handler->protocols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) mutex_lock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) list_del(&ir_raw_handler->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) list_for_each_entry(raw, &ir_raw_client_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (ir_raw_handler->raw_unregister &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) (raw->dev->enabled_protocols & protocols))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ir_raw_handler->raw_unregister(raw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ir_raw_disable_protocols(raw->dev, protocols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) atomic64_andnot(protocols, &available_protocols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) mutex_unlock(&ir_raw_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) EXPORT_SYMBOL(ir_raw_handler_unregister);