^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Loopback driver for rc-core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2010 David Härdeman <david@hardeman.nu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This driver receives TX data and passes it back as RX data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * which is useful for (scripted) debugging of rc-core without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * having to use actual hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <media/rc-core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define DRIVER_NAME "rc-loopback"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define dprintk(x...) if (debug) printk(KERN_INFO DRIVER_NAME ": " x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define RXMASK_REGULAR 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define RXMASK_LEARNING 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static bool debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct loopback_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct rc_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u32 txmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u32 txcarrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u32 txduty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) bool idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) bool learning;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bool carrierreport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u32 rxcarriermin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u32 rxcarriermax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static struct loopback_dev loopdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static int loop_set_tx_mask(struct rc_dev *dev, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct loopback_dev *lodev = dev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if ((mask & (RXMASK_REGULAR | RXMASK_LEARNING)) != mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) dprintk("invalid tx mask: %u\n", mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) dprintk("setting tx mask: %u\n", mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) lodev->txmask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static int loop_set_tx_carrier(struct rc_dev *dev, u32 carrier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct loopback_dev *lodev = dev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) dprintk("setting tx carrier: %u\n", carrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) lodev->txcarrier = carrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static int loop_set_tx_duty_cycle(struct rc_dev *dev, u32 duty_cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct loopback_dev *lodev = dev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (duty_cycle < 1 || duty_cycle > 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) dprintk("invalid duty cycle: %u\n", duty_cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) dprintk("setting duty cycle: %u\n", duty_cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) lodev->txduty = duty_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int loop_set_rx_carrier_range(struct rc_dev *dev, u32 min, u32 max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct loopback_dev *lodev = dev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (min < 1 || min > max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) dprintk("invalid rx carrier range %u to %u\n", min, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) dprintk("setting rx carrier range %u to %u\n", min, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) lodev->rxcarriermin = min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) lodev->rxcarriermax = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int loop_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct loopback_dev *lodev = dev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 rxmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct ir_raw_event rawir = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (lodev->txcarrier < lodev->rxcarriermin ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) lodev->txcarrier > lodev->rxcarriermax) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dprintk("ignoring tx, carrier out of range\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (lodev->learning)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) rxmask = RXMASK_LEARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) rxmask = RXMASK_REGULAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (!(rxmask & lodev->txmask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) dprintk("ignoring tx, rx mask mismatch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) rawir.pulse = i % 2 ? false : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) rawir.duration = txbuf[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (rawir.duration)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ir_raw_event_store_with_filter(dev, &rawir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* Fake a silence long enough to cause us to go idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) rawir.pulse = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) rawir.duration = dev->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ir_raw_event_store_with_filter(dev, &rawir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ir_raw_event_handle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void loop_set_idle(struct rc_dev *dev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct loopback_dev *lodev = dev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (lodev->idle != enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) dprintk("%sing idle mode\n", enable ? "enter" : "exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) lodev->idle = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int loop_set_learning_mode(struct rc_dev *dev, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct loopback_dev *lodev = dev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (lodev->learning != enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dprintk("%sing learning mode\n", enable ? "enter" : "exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) lodev->learning = !!enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int loop_set_carrier_report(struct rc_dev *dev, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct loopback_dev *lodev = dev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (lodev->carrierreport != enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) dprintk("%sabling carrier reports\n", enable ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) lodev->carrierreport = !!enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static int loop_set_wakeup_filter(struct rc_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct rc_scancode_filter *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static const unsigned int max = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct ir_raw_event *raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* fine to disable filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!sc->mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* encode the specified filter and loop it back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc->data, raw, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* still loop back the partial raw IR even if it's incomplete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (ret == -ENOBUFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ret = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* do the loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) for (i = 0; i < ret; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ir_raw_event_store(dev, &raw[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ir_raw_event_handle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) kfree(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static int __init loop_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct rc_dev *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) rc = rc_allocate_device(RC_DRIVER_IR_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) printk(KERN_ERR DRIVER_NAME ": rc_dev allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) rc->device_name = "rc-core loopback device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rc->input_phys = "rc-core/virtual";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) rc->input_id.bustype = BUS_VIRTUAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) rc->input_id.version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) rc->driver_name = DRIVER_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) rc->map_name = RC_MAP_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) rc->priv = &loopdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) rc->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) rc->encode_wakeup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) rc->timeout = MS_TO_US(100); /* 100 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) rc->min_timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) rc->max_timeout = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) rc->rx_resolution = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rc->tx_resolution = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) rc->s_tx_mask = loop_set_tx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) rc->s_tx_carrier = loop_set_tx_carrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) rc->s_tx_duty_cycle = loop_set_tx_duty_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rc->s_rx_carrier_range = loop_set_rx_carrier_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) rc->tx_ir = loop_tx_ir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) rc->s_idle = loop_set_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) rc->s_learning_mode = loop_set_learning_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) rc->s_carrier_report = loop_set_carrier_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) rc->s_wakeup_filter = loop_set_wakeup_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) loopdev.txmask = RXMASK_REGULAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) loopdev.txcarrier = 36000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) loopdev.txduty = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) loopdev.rxcarriermin = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) loopdev.rxcarriermax = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) loopdev.idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) loopdev.learning = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) loopdev.carrierreport = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ret = rc_register_device(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) printk(KERN_ERR DRIVER_NAME ": rc_dev registration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rc_free_device(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) loopdev.dev = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void __exit loop_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) rc_unregister_device(loopdev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) module_init(loop_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) module_exit(loop_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) module_param(debug, bool, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) MODULE_PARM_DESC(debug, "Enable debug messages");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) MODULE_DESCRIPTION("Loopback device for rc-core debugging");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) MODULE_LICENSE("GPL");