Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * driver for ENE KB3926 B/C/D/E/F CIR (pnp id: ENE0XXX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Special thanks to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *   Sami R. <maesesami@gmail.com> for lot of help in debugging and therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *    bringing to life support for transmission & learning mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *   Charlie Andrews <charliethepilot@googlemail.com> for lots of help in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *   bringing up the support of new firmware buffer that is popular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *   on latest notebooks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *   ENE for partial device documentation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/pnp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <media/rc-core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "ene_ir.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) static int sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) static bool learning_mode_force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) static int debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static bool txsim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) static void ene_set_reg_addr(struct ene_device *dev, u16 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) /* read a hardware register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) static u8 ene_read_reg(struct ene_device *dev, u16 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	u8 retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	ene_set_reg_addr(dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	retval = inb(dev->hw_io + ENE_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	dbg_regs("reg %04x == %02x", reg, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) /* write a hardware register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static void ene_write_reg(struct ene_device *dev, u16 reg, u8 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	dbg_regs("reg %04x <- %02x", reg, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	ene_set_reg_addr(dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	outb(value, dev->hw_io + ENE_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /* Set bits in hardware register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static void ene_set_reg_mask(struct ene_device *dev, u16 reg, u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	dbg_regs("reg %04x |= %02x", reg, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	ene_set_reg_addr(dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	outb(inb(dev->hw_io + ENE_IO) | mask, dev->hw_io + ENE_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) /* Clear bits in hardware register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static void ene_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	dbg_regs("reg %04x &= ~%02x ", reg, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	ene_set_reg_addr(dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	outb(inb(dev->hw_io + ENE_IO) & ~mask, dev->hw_io + ENE_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) /* A helper to set/clear a bit in register according to boolean variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) static void ene_set_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 								bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		ene_set_reg_mask(dev, reg, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		ene_clear_reg_mask(dev, reg, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) /* detect hardware features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static int ene_hw_detect(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	u8 chip_major, chip_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	u8 hw_revision, old_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	u8 fw_reg2, fw_reg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	ene_clear_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	chip_major = ene_read_reg(dev, ENE_ECVER_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	chip_minor = ene_read_reg(dev, ENE_ECVER_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	ene_set_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	hw_revision = ene_read_reg(dev, ENE_ECHV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	old_ver = ene_read_reg(dev, ENE_HW_VER_OLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	dev->pll_freq = (ene_read_reg(dev, ENE_PLLFRH) << 4) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		(ene_read_reg(dev, ENE_PLLFRL) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	if (sample_period != ENE_DEFAULT_SAMPLE_PERIOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		dev->rx_period_adjust =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 			dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 2 : 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (hw_revision == 0xFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		pr_warn("device seems to be disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		pr_warn("send a mail to lirc-list@lists.sourceforge.net\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		pr_warn("please attach output of acpidump and dmidecode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	pr_notice("chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		  chip_major, chip_minor, old_ver, hw_revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	pr_notice("PLL freq = %d\n", dev->pll_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	if (chip_major == 0x33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		pr_warn("chips 0x33xx aren't supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		dev->hw_revision = ENE_HW_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		pr_notice("KB3926C detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	} else if (old_ver == 0x24 && hw_revision == 0xC0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		dev->hw_revision = ENE_HW_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		pr_notice("KB3926B detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		dev->hw_revision = ENE_HW_D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		pr_notice("KB3926D or higher detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	/* detect features hardware supports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	if (dev->hw_revision < ENE_HW_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	fw_reg1 = ene_read_reg(dev, ENE_FW1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	fw_reg2 = ene_read_reg(dev, ENE_FW2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	pr_notice("Firmware regs: %02x %02x\n", fw_reg1, fw_reg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	dev->hw_use_gpio_0a = !!(fw_reg2 & ENE_FW2_GP0A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	dev->hw_learning_and_tx_capable = !!(fw_reg2 & ENE_FW2_LEARNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	dev->hw_extra_buffer = !!(fw_reg1 & ENE_FW1_HAS_EXTRA_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	if (dev->hw_learning_and_tx_capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		dev->hw_fan_input = !!(fw_reg2 & ENE_FW2_FAN_INPUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	pr_notice("Hardware features:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	if (dev->hw_learning_and_tx_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		pr_notice("* Supports transmitting & learning mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		pr_notice("   This feature is rare and therefore,\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		pr_notice("   you are welcome to test it,\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		pr_notice("   and/or contact the author via:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		pr_notice("   lirc-list@lists.sourceforge.net\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		pr_notice("   or maximlevitsky@gmail.com\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		pr_notice("* Uses GPIO %s for IR raw input\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			  dev->hw_use_gpio_0a ? "40" : "0A");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		if (dev->hw_fan_input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 			pr_notice("* Uses unused fan feedback input as source of demodulated IR data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	if (!dev->hw_fan_input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		pr_notice("* Uses GPIO %s for IR demodulated input\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			  dev->hw_use_gpio_0a ? "0A" : "40");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	if (dev->hw_extra_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		pr_notice("* Uses new style input buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) /* Read properties of hw sample buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static void ene_rx_setup_hw_buffer(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	ene_rx_read_hw_pointer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	dev->r_pointer = dev->w_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	if (!dev->hw_extra_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		dev->buffer_len = ENE_FW_PACKET_SIZE * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER+1) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	dev->extra_buf1_address = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	dev->extra_buf1_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 4) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	dev->extra_buf2_address = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	dev->extra_buf2_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	pr_notice("Hardware uses 2 extended buffers:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	pr_notice("  0x%04x - len : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		  dev->extra_buf1_address, dev->extra_buf1_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	pr_notice("  0x%04x - len : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		  dev->extra_buf2_address, dev->extra_buf2_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	pr_notice("Total buffer len = %d\n", dev->buffer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	if (dev->buffer_len > 64 || dev->buffer_len < 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	if (dev->extra_buf1_address > 0xFBFC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 					dev->extra_buf1_address < 0xEC00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	if (dev->extra_buf2_address > 0xFBFC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 					dev->extra_buf2_address < 0xEC00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	if (dev->r_pointer > dev->buffer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	pr_warn("Error validating extra buffers, device probably won't work\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	dev->hw_extra_buffer = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) /* Restore the pointers to extra buffers - to make module reload work*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static void ene_rx_restore_hw_buffer(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	if (!dev->hw_extra_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 				dev->extra_buf1_address & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 				dev->extra_buf1_address >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 2, dev->extra_buf1_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 				dev->extra_buf2_address & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 				dev->extra_buf2_address >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 				dev->extra_buf2_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) /* Read hardware write pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) static void ene_rx_read_hw_pointer(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	if (dev->hw_extra_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		dev->w_pointer = ene_read_reg(dev, ENE_FW_RX_POINTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		dev->w_pointer = ene_read_reg(dev, ENE_FW2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			& ENE_FW2_BUF_WPTR ? 0 : ENE_FW_PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	dbg_verbose("RB: HW write pointer: %02x, driver read pointer: %02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		dev->w_pointer, dev->r_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) /* Gets address of next sample from HW ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static int ene_rx_get_sample_reg(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	int r_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	if (dev->r_pointer == dev->w_pointer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		dbg_verbose("RB: hit end, try update w_pointer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		ene_rx_read_hw_pointer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	if (dev->r_pointer == dev->w_pointer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		dbg_verbose("RB: end of data at %d", dev->r_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	dbg_verbose("RB: reading at offset %d", dev->r_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	r_pointer = dev->r_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	dev->r_pointer++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	if (dev->r_pointer == dev->buffer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		dev->r_pointer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	dbg_verbose("RB: next read will be from offset %d", dev->r_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (r_pointer < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		dbg_verbose("RB: read at main buffer at %d", r_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		return ENE_FW_SAMPLE_BUFFER + r_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	r_pointer -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	if (r_pointer < dev->extra_buf1_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		dbg_verbose("RB: read at 1st extra buffer at %d", r_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		return dev->extra_buf1_address + r_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	r_pointer -= dev->extra_buf1_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (r_pointer < dev->extra_buf2_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		dbg_verbose("RB: read at 2nd extra buffer at %d", r_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		return dev->extra_buf2_address + r_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	dbg("attempt to read beyond ring buffer end");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) /* Sense current received carrier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static void ene_rx_sense_carrier(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	int carrier, duty_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	int period = ene_read_reg(dev, ENE_CIRCAR_PRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	int hperiod = ene_read_reg(dev, ENE_CIRCAR_HPRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	if (!(period & ENE_CIRCAR_PRD_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	period &= ~ENE_CIRCAR_PRD_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	if (!period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	dbg("RX: hardware carrier period = %02x", period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	dbg("RX: hardware carrier pulse period = %02x", hperiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	carrier = 2000000 / period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	duty_cycle = (hperiod * 100) / period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	dbg("RX: sensed carrier = %d Hz, duty cycle %d%%",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 						carrier, duty_cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if (dev->carrier_detect_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		struct ir_raw_event ev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			.carrier_report = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			.carrier = carrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			.duty_cycle = duty_cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		ir_raw_event_store(dev->rdev, &ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) /* this enables/disables the CIR RX engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) static void ene_rx_enable_cir_engine(struct ene_device *dev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	ene_set_clear_reg_mask(dev, ENE_CIRCFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 			ENE_CIRCFG_RX_EN | ENE_CIRCFG_RX_IRQ, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) /* this selects input for CIR engine. Ether GPIO 0A or GPIO40*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) static void ene_rx_select_input(struct ene_device *dev, bool gpio_0a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_GPIO0A, gpio_0a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * this enables alternative input via fan tachometer sensor and bypasses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * the hw CIR engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static void ene_rx_enable_fan_input(struct ene_device *dev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	if (!dev->hw_fan_input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (!enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		ene_write_reg(dev, ENE_FAN_AS_IN1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		ene_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		ene_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) /* setup the receiver for RX*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) static void ene_rx_setup(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	bool learning_mode = dev->learning_mode_enabled ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 					dev->carrier_detect_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	int sample_period_adjust = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	dbg("RX: setup receiver, learning mode = %d", learning_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	/* This selects RLC input and clears CFG2 settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	ene_write_reg(dev, ENE_CIRCFG2, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	/* set sample period*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	if (sample_period == ENE_DEFAULT_SAMPLE_PERIOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		sample_period_adjust =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 1 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	ene_write_reg(dev, ENE_CIRRLC_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			(sample_period + sample_period_adjust) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 						ENE_CIRRLC_CFG_OVERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	/* revB doesn't support inputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	if (dev->hw_revision < ENE_HW_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		goto select_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	if (learning_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		WARN_ON(!dev->hw_learning_and_tx_capable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		/* Enable the opposite of the normal input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		That means that if GPIO40 is normally used, use GPIO0A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		and vice versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		This input will carry non demodulated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		signal, and we will tell the hw to demodulate it itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		ene_rx_select_input(dev, !dev->hw_use_gpio_0a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		dev->rx_fan_input_inuse = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		/* Enable carrier demodulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		/* Enable carrier detection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		ene_write_reg(dev, ENE_CIRCAR_PULS, 0x63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			dev->carrier_detect_enabled || debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		if (dev->hw_fan_input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			dev->rx_fan_input_inuse = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			ene_rx_select_input(dev, dev->hw_use_gpio_0a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		/* Disable carrier detection & demodulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		ene_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) select_timeout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	if (dev->rx_fan_input_inuse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		dev->rdev->rx_resolution = ENE_FW_SAMPLE_PERIOD_FAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		/* Fan input doesn't support timeouts, it just ends the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			input with a maximum sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		dev->rdev->min_timeout = dev->rdev->max_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			ENE_FW_SMPL_BUF_FAN_MSK *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 				ENE_FW_SAMPLE_PERIOD_FAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		dev->rdev->rx_resolution = sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		/* Theoreticly timeout is unlimited, but we cap it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		 * because it was seen that on one device, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		 * would stop sending spaces after around 250 msec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		 * Besides, this is close to 2^32 anyway and timeout is u32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		dev->rdev->min_timeout = 127 * sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		dev->rdev->max_timeout = 200000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (dev->hw_learning_and_tx_capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		dev->rdev->tx_resolution = sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (dev->rdev->timeout > dev->rdev->max_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		dev->rdev->timeout = dev->rdev->max_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	if (dev->rdev->timeout < dev->rdev->min_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		dev->rdev->timeout = dev->rdev->min_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) /* Enable the device for receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) static void ene_rx_enable_hw(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	u8 reg_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	/* Enable system interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (dev->hw_revision < ENE_HW_C) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		ene_write_reg(dev, ENEB_IRQ, dev->irq << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		ene_write_reg(dev, ENEB_IRQ_UNK1, 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		reg_value = ene_read_reg(dev, ENE_IRQ) & 0xF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		reg_value |= ENE_IRQ_UNK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		reg_value &= ~ENE_IRQ_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		reg_value |= (dev->irq & ENE_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		ene_write_reg(dev, ENE_IRQ, reg_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	/* Enable inputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	ene_rx_enable_fan_input(dev, dev->rx_fan_input_inuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	ene_rx_enable_cir_engine(dev, !dev->rx_fan_input_inuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	/* ack any pending irqs - just in case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	ene_irq_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* enable firmware bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	/* enter idle mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	ir_raw_event_set_idle(dev->rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) /* Enable the device for receive - wrapper to track the state*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) static void ene_rx_enable(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	ene_rx_enable_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	dev->rx_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) /* Disable the device receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) static void ene_rx_disable_hw(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	/* disable inputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	ene_rx_enable_cir_engine(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	ene_rx_enable_fan_input(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	/* disable hardware IRQ and firmware flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	ir_raw_event_set_idle(dev->rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) /* Disable the device receiver - wrapper to track the state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) static void ene_rx_disable(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	ene_rx_disable_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	dev->rx_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) /* This resets the receiver. Useful to stop stream of spaces at end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  * transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) static void ene_rx_reset(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) /* Set up the TX carrier frequency and duty cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) static void ene_tx_set_carrier(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	u8 tx_puls_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	ene_set_clear_reg_mask(dev, ENE_CIRCFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		ENE_CIRCFG_TX_CARR, dev->tx_period > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (!dev->tx_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	BUG_ON(dev->tx_duty_cycle >= 100 || dev->tx_duty_cycle <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	tx_puls_width = dev->tx_period / (100 / dev->tx_duty_cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	if (!tx_puls_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		tx_puls_width = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	dbg("TX: pulse distance = %d * 500 ns", dev->tx_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	dbg("TX: pulse width = %d * 500 ns", tx_puls_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	ene_write_reg(dev, ENE_CIRMOD_PRD, dev->tx_period | ENE_CIRMOD_PRD_POL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	ene_write_reg(dev, ENE_CIRMOD_HPRD, tx_puls_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) /* Enable/disable transmitters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static void ene_tx_set_transmitters(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	ene_set_clear_reg_mask(dev, ENE_GPIOFS8, ENE_GPIOFS8_GPIO41,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 					!!(dev->transmitter_mask & 0x01));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	ene_set_clear_reg_mask(dev, ENE_GPIOFS1, ENE_GPIOFS1_GPIO0D,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 					!!(dev->transmitter_mask & 0x02));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) /* prepare transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) static void ene_tx_enable(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	u8 conf1 = ene_read_reg(dev, ENE_CIRCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	u8 fwreg2 = ene_read_reg(dev, ENE_FW2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	dev->saved_conf1 = conf1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	/* Show information about currently connected transmitter jacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	if (fwreg2 & ENE_FW2_EMMITER1_CONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		dbg("TX: Transmitter #1 is connected");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (fwreg2 & ENE_FW2_EMMITER2_CONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		dbg("TX: Transmitter #2 is connected");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		pr_warn("TX: transmitter cable isn't connected!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	/* disable receive on revc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	if (dev->hw_revision == ENE_HW_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		conf1 &= ~ENE_CIRCFG_RX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	/* Enable TX engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	conf1 |= ENE_CIRCFG_TX_EN | ENE_CIRCFG_TX_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	ene_write_reg(dev, ENE_CIRCFG, conf1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) /* end transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static void ene_tx_disable(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	ene_write_reg(dev, ENE_CIRCFG, dev->saved_conf1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	dev->tx_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) /* TX one sample - must be called with dev->hw_lock*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) static void ene_tx_sample(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	u8 raw_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	u32 sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	bool pulse = dev->tx_sample_pulse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	if (!dev->tx_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		pr_warn("TX: BUG: attempt to transmit NULL buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	/* Grab next TX sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (!dev->tx_sample) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		if (dev->tx_pos == dev->tx_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			if (!dev->tx_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 				dbg("TX: no more data to send");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 				dev->tx_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 				goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 				dbg("TX: last sample sent by hardware");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 				ene_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 				complete(&dev->tx_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		sample = dev->tx_buffer[dev->tx_pos++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		dev->tx_sample_pulse = !dev->tx_sample_pulse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		dev->tx_sample = DIV_ROUND_CLOSEST(sample, sample_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		if (!dev->tx_sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 			dev->tx_sample = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	raw_tx = min(dev->tx_sample , (unsigned int)ENE_CIRRLC_OUT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	dev->tx_sample -= raw_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	dbg("TX: sample %8d (%s)", raw_tx * sample_period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 						pulse ? "pulse" : "space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	if (pulse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		raw_tx |= ENE_CIRRLC_OUT_PULSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	ene_write_reg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		dev->tx_reg ? ENE_CIRRLC_OUT1 : ENE_CIRRLC_OUT0, raw_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	dev->tx_reg = !dev->tx_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	/* simulate TX done interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (txsim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		mod_timer(&dev->tx_sim_timer, jiffies + HZ / 500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) /* timer to simulate tx done interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) static void ene_tx_irqsim(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	struct ene_device *dev = from_timer(dev, t, tx_sim_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	ene_tx_sample(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) /* read irq status and ack it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) static int ene_irq_status(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	u8 irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	u8 fw_flags1, fw_flags2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	fw_flags2 = ene_read_reg(dev, ENE_FW2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	if (dev->hw_revision < ENE_HW_C) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		irq_status = ene_read_reg(dev, ENEB_IRQ_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		if (!(irq_status & ENEB_IRQ_STATUS_IR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		ene_clear_reg_mask(dev, ENEB_IRQ_STATUS, ENEB_IRQ_STATUS_IR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		return ENE_IRQ_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	irq_status = ene_read_reg(dev, ENE_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (!(irq_status & ENE_IRQ_STATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* original driver does that twice - a workaround ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	/* check RX interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (fw_flags2 & ENE_FW2_RXIRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		retval |= ENE_IRQ_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		ene_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_RXIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	/* check TX interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	fw_flags1 = ene_read_reg(dev, ENE_FW1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (fw_flags1 & ENE_FW1_TXIRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		ene_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		retval |= ENE_IRQ_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) /* interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) static irqreturn_t ene_isr(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	u16 hw_value, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	int hw_sample, irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	bool pulse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	irqreturn_t retval = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct ene_device *dev = (struct ene_device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct ir_raw_event ev = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	dbg_verbose("ISR called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	ene_rx_read_hw_pointer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	irq_status = ene_irq_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	if (!irq_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	retval = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (irq_status & ENE_IRQ_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		dbg_verbose("TX interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		if (!dev->hw_learning_and_tx_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			dbg("TX interrupt on unsupported device!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		ene_tx_sample(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	if (!(irq_status & ENE_IRQ_RX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	dbg_verbose("RX interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	if (dev->hw_learning_and_tx_capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		ene_rx_sense_carrier(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	/* On hardware that don't support extra buffer we need to trust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		the interrupt and not track the read pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (!dev->hw_extra_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		dev->r_pointer = dev->w_pointer == 0 ? ENE_FW_PACKET_SIZE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		reg = ene_rx_get_sample_reg(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		dbg_verbose("next sample to read at: %04x", reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		hw_value = ene_read_reg(dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		if (dev->rx_fan_input_inuse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			int offset = ENE_FW_SMPL_BUF_FAN - ENE_FW_SAMPLE_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			/* read high part of the sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			hw_value |= ene_read_reg(dev, reg + offset) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			pulse = hw_value & ENE_FW_SMPL_BUF_FAN_PLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			/* clear space bit, and other unused bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			hw_value &= ENE_FW_SMPL_BUF_FAN_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			hw_sample = hw_value * ENE_FW_SAMPLE_PERIOD_FAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			pulse = !(hw_value & ENE_FW_SAMPLE_SPACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			hw_value &= ~ENE_FW_SAMPLE_SPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			hw_sample = hw_value * sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			if (dev->rx_period_adjust) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 				hw_sample *= 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 				hw_sample /= (100 + dev->rx_period_adjust);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		if (!dev->hw_extra_buffer && !hw_sample) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			dev->r_pointer = dev->w_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		ev.duration = hw_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		ev.pulse = pulse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		ir_raw_event_store_with_filter(dev->rdev, &ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	ir_raw_event_handle(dev->rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) /* Initialize default settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) static void ene_setup_default_settings(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	dev->tx_period = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	dev->tx_duty_cycle = 50; /*%*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	dev->transmitter_mask = 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	dev->learning_mode_enabled = learning_mode_force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	/* Set reasonable default timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	dev->rdev->timeout = MS_TO_US(150);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) /* Upload all hardware settings at once. Used at load and resume time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static void ene_setup_hw_settings(struct ene_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (dev->hw_learning_and_tx_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		ene_tx_set_carrier(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		ene_tx_set_transmitters(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	ene_rx_setup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) /* outside interface: called on first open*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) static int ene_open(struct rc_dev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	struct ene_device *dev = rdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	ene_rx_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) /* outside interface: called on device close*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) static void ene_close(struct rc_dev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	struct ene_device *dev = rdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	ene_rx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) /* outside interface: set transmitter mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) static int ene_set_tx_mask(struct rc_dev *rdev, u32 tx_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	struct ene_device *dev = rdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	dbg("TX: attempt to set transmitter mask %02x", tx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	/* invalid txmask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	if (!tx_mask || tx_mask & ~0x03) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		dbg("TX: invalid mask");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		/* return count of transmitters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	dev->transmitter_mask = tx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	ene_tx_set_transmitters(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) /* outside interface : set tx carrier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) static int ene_set_tx_carrier(struct rc_dev *rdev, u32 carrier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct ene_device *dev = rdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	u32 period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	dbg("TX: attempt to set tx carrier to %d kHz", carrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (carrier == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	period = 2000000 / carrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	if (period && (period > ENE_CIRMOD_PRD_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			period < ENE_CIRMOD_PRD_MIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		dbg("TX: out of range %d-%d kHz carrier",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			2000 / ENE_CIRMOD_PRD_MIN, 2000 / ENE_CIRMOD_PRD_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	dev->tx_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	ene_tx_set_carrier(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) /*outside interface : set tx duty cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) static int ene_set_tx_duty_cycle(struct rc_dev *rdev, u32 duty_cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	struct ene_device *dev = rdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	dbg("TX: setting duty cycle to %d%%", duty_cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	dev->tx_duty_cycle = duty_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	ene_tx_set_carrier(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) /* outside interface: enable learning mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) static int ene_set_learning_mode(struct rc_dev *rdev, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	struct ene_device *dev = rdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (enable == dev->learning_mode_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	dev->learning_mode_enabled = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	ene_rx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	ene_rx_setup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	ene_rx_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) static int ene_set_carrier_report(struct rc_dev *rdev, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	struct ene_device *dev = rdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	if (enable == dev->carrier_detect_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	dev->carrier_detect_enabled = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	ene_rx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	ene_rx_setup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	ene_rx_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) /* outside interface: enable or disable idle mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) static void ene_set_idle(struct rc_dev *rdev, bool idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	struct ene_device *dev = rdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		ene_rx_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		dbg("RX: end of data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) /* outside interface: transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) static int ene_transmit(struct rc_dev *rdev, unsigned *buf, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	struct ene_device *dev = rdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	dev->tx_buffer = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	dev->tx_len = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	dev->tx_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	dev->tx_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	dev->tx_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	dev->tx_sample = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	dev->tx_sample_pulse = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	dbg("TX: %d samples", dev->tx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	ene_tx_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	/* Transmit first two samples */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	ene_tx_sample(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	ene_tx_sample(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		dbg("TX: timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		ene_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		dbg("TX: done");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) /* probe entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	int error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	struct rc_dev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	struct ene_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	/* allocate memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	if (!dev || !rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		goto exit_free_dev_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	/* validate resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	/* init these to -1, as 0 is valid for both */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	dev->hw_io = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	dev->irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if (!pnp_port_valid(pnp_dev, 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	    pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		goto exit_free_dev_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	if (!pnp_irq_valid(pnp_dev, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		goto exit_free_dev_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	spin_lock_init(&dev->hw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	dev->hw_io = pnp_port_start(pnp_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	dev->irq = pnp_irq(pnp_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	pnp_set_drvdata(pnp_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	dev->pnp_dev = pnp_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	/* don't allow too short/long sample periods */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	if (sample_period < 5 || sample_period > 0x7F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		sample_period = ENE_DEFAULT_SAMPLE_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	/* detect hardware version and features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	error = ene_hw_detect(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		goto exit_free_dev_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if (!dev->hw_learning_and_tx_capable && txsim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		dev->hw_learning_and_tx_capable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		timer_setup(&dev->tx_sim_timer, ene_tx_irqsim, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		pr_warn("Simulation of TX activated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	if (!dev->hw_learning_and_tx_capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		learning_mode_force = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	rdev->priv = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	rdev->open = ene_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	rdev->close = ene_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	rdev->s_idle = ene_set_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	rdev->driver_name = ENE_DRIVER_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	rdev->map_name = RC_MAP_RC6_MCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	rdev->device_name = "ENE eHome Infrared Remote Receiver";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	if (dev->hw_learning_and_tx_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		rdev->s_learning_mode = ene_set_learning_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		init_completion(&dev->tx_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		rdev->tx_ir = ene_transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		rdev->s_tx_mask = ene_set_tx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		rdev->s_tx_carrier = ene_set_tx_carrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		rdev->s_tx_duty_cycle = ene_set_tx_duty_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		rdev->s_carrier_report = ene_set_carrier_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		rdev->device_name = "ENE eHome Infrared Remote Transceiver";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	dev->rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	ene_rx_setup_hw_buffer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	ene_setup_default_settings(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	ene_setup_hw_settings(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	device_set_wakeup_capable(&pnp_dev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	device_set_wakeup_enable(&pnp_dev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	error = rc_register_device(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		goto exit_free_dev_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	/* claim the resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		goto exit_unregister_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (request_irq(dev->irq, ene_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		goto exit_release_hw_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	pr_notice("driver has been successfully loaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) exit_release_hw_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	release_region(dev->hw_io, ENE_IO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) exit_unregister_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	rc_unregister_device(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) exit_free_dev_rdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	rc_free_device(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* main unload function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static void ene_remove(struct pnp_dev *pnp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	struct ene_device *dev = pnp_get_drvdata(pnp_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	spin_lock_irqsave(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	ene_rx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	ene_rx_restore_hw_buffer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	spin_unlock_irqrestore(&dev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	release_region(dev->hw_io, ENE_IO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	rc_unregister_device(dev->rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /* enable wake on IR (wakes on specific button on original remote) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static void ene_enable_wake(struct ene_device *dev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	dbg("wake on IR %s", enable ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	ene_set_clear_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	struct ene_device *dev = pnp_get_drvdata(pnp_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	bool wake = device_may_wakeup(&dev->pnp_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	if (!wake && dev->rx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		ene_rx_disable_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	ene_enable_wake(dev, wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static int ene_resume(struct pnp_dev *pnp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	struct ene_device *dev = pnp_get_drvdata(pnp_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	ene_setup_hw_settings(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	if (dev->rx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		ene_rx_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	ene_enable_wake(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) static void ene_shutdown(struct pnp_dev *pnp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	struct ene_device *dev = pnp_get_drvdata(pnp_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	ene_enable_wake(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static const struct pnp_device_id ene_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	{.id = "ENE0100",},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	{.id = "ENE0200",},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	{.id = "ENE0201",},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	{.id = "ENE0202",},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static struct pnp_driver ene_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	.name = ENE_DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	.id_table = ene_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	.probe = ene_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	.remove = ene_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	.suspend = ene_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	.resume = ene_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	.shutdown = ene_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) module_param(sample_period, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) module_param(learning_mode_force, bool, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) MODULE_PARM_DESC(learning_mode_force, "Enable learning mode by default");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) module_param(debug, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) MODULE_PARM_DESC(debug, "Debug level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) module_param(txsim, bool, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) MODULE_PARM_DESC(txsim,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	"Simulate TX features on unsupported hardware (dangerous)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) MODULE_DEVICE_TABLE(pnp, ene_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) MODULE_DESCRIPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	("Infrared input driver for KB3926B/C/D/E/F (aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) MODULE_AUTHOR("Maxim Levitsky");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) module_pnp_driver(ene_driver);