^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2011-2016 Synaptics Incorporated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2011 Unixphere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This driver provides the core support for a single RMI4-based device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * The RMI4 specification can be found here (URL split for line length):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * http://www.synaptics.com/sites/default/files/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <uapi/linux/input.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/rmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "rmi_bus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "rmi_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define HAS_NONSTANDARD_PDT_MASK 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define RMI4_MAX_PAGE 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define RMI4_PAGE_SIZE 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define RMI4_PAGE_MASK 0xFF00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define RMI_DEVICE_RESET_CMD 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DEFAULT_RESET_DELAY_MS 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void rmi_free_function_list(struct rmi_device *rmi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct rmi_function *fn, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Doing it in the reverse order so F01 will be removed last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) list_for_each_entry_safe_reverse(fn, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) &data->function_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) list_del(&fn->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) rmi_unregister_function(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) devm_kfree(&rmi_dev->dev, data->irq_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) data->irq_memory = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) data->irq_status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) data->fn_irq_bits = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) data->current_irq_mask = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) data->new_irq_mask = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) data->f01_container = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) data->f34_container = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int reset_one_function(struct rmi_function *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct rmi_function_handler *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!fn || !fn->dev.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) fh = to_rmi_function_handler(fn->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (fh->reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) retval = fh->reset(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) dev_err(&fn->dev, "Reset failed with code %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static int configure_one_function(struct rmi_function *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct rmi_function_handler *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!fn || !fn->dev.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) fh = to_rmi_function_handler(fn->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (fh->config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) retval = fh->config(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dev_err(&fn->dev, "Config failed with code %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct rmi_function *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) list_for_each_entry(entry, &data->function_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) retval = reset_one_function(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct rmi_function *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) list_for_each_entry(entry, &data->function_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) retval = configure_one_function(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct device *dev = &rmi_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (!data->attn_data.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) error = rmi_read_block(rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) data->f01_container->fd.data_base_addr + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) data->irq_status, data->num_of_irq_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (error < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) dev_err(dev, "Failed to read irqs, code=%d\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) mutex_lock(&data->irq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) data->irq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * At this point, irq_status has all bits that are set in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * interrupt status register and are enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) mutex_unlock(&data->irq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) for_each_set_bit(i, data->irq_status, data->irq_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) handle_nested_irq(irq_find_mapping(data->irqdomain, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (data->input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) input_sync(data->input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void *data, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct rmi4_attn_data attn_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void *fifo_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!drvdata->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) fifo_data = kmemdup(data, size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!fifo_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) attn_data.irq_status = irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) attn_data.size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) attn_data.data = fifo_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) kfifo_put(&drvdata->attn_fifo, attn_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) EXPORT_SYMBOL_GPL(rmi_set_attn_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct rmi_device *rmi_dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct rmi4_attn_data attn_data = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int ret, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) count = kfifo_get(&drvdata->attn_fifo, &attn_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *(drvdata->irq_status) = attn_data.irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) drvdata->attn_data = attn_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ret = rmi_process_interrupt_requests(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) "Failed to process interrupt request: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kfree(attn_data.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) drvdata->attn_data.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!kfifo_is_empty(&drvdata->attn_fifo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return rmi_irq_fn(irq, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int rmi_irq_init(struct rmi_device *rmi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int irq_flags = irq_get_trigger_type(pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (!irq_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) irq_flags = IRQF_TRIGGER_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) rmi_irq_fn, irq_flags | IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dev_driver_string(rmi_dev->xport->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) data->enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct rmi_function *rmi_find_function(struct rmi_device *rmi_dev, u8 number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct rmi_function *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) list_for_each_entry(entry, &data->function_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (entry->fd.function_number == number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int suspend_one_function(struct rmi_function *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct rmi_function_handler *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!fn || !fn->dev.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) fh = to_rmi_function_handler(fn->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (fh->suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) retval = fh->suspend(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) dev_err(&fn->dev, "Suspend failed with code %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int rmi_suspend_functions(struct rmi_device *rmi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct rmi_function *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) list_for_each_entry(entry, &data->function_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) retval = suspend_one_function(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int resume_one_function(struct rmi_function *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct rmi_function_handler *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!fn || !fn->dev.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) fh = to_rmi_function_handler(fn->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (fh->resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) retval = fh->resume(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) dev_err(&fn->dev, "Resume failed with code %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int rmi_resume_functions(struct rmi_device *rmi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct rmi_function *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) list_for_each_entry(entry, &data->function_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) retval = resume_one_function(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int rmi_enable_sensor(struct rmi_device *rmi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) retval = rmi_driver_process_config_requests(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return rmi_process_interrupt_requests(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * rmi_driver_set_input_params - set input device id and other data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * @rmi_dev: Pointer to an RMI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * @input: Pointer to input device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static int rmi_driver_set_input_params(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct input_dev *input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) input->name = SYNAPTICS_INPUT_DEVICE_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) input->id.vendor = SYNAPTICS_VENDOR_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) input->id.bustype = BUS_RMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static void rmi_driver_set_input_name(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct input_dev *input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) const char *device_name = rmi_f01_get_product_ID(data->f01_container);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) "Synaptics %s", device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) input->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct device *dev = &rmi_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) mutex_lock(&data->irq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) bitmap_or(data->new_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) data->current_irq_mask, mask, data->irq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) error = rmi_write_block(rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) data->f01_container->fd.control_base_addr + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) data->new_irq_mask, data->num_of_irq_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (error < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dev_err(dev, "%s: Failed to change enabled interrupts!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) bitmap_copy(data->current_irq_mask, data->new_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) data->num_of_irq_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) error_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) mutex_unlock(&data->irq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct device *dev = &rmi_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) mutex_lock(&data->irq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) bitmap_andnot(data->fn_irq_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) data->fn_irq_bits, mask, data->irq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bitmap_andnot(data->new_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) data->current_irq_mask, mask, data->irq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) error = rmi_write_block(rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) data->f01_container->fd.control_base_addr + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) data->new_irq_mask, data->num_of_irq_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (error < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dev_err(dev, "%s: Failed to change enabled interrupts!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) bitmap_copy(data->current_irq_mask, data->new_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) data->num_of_irq_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) error_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) mutex_unlock(&data->irq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * Can get called before the driver is fully ready to deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * this situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (!data || !data->f01_container) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dev_warn(&rmi_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) "Not ready to handle reset yet!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) error = rmi_read_block(rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) data->f01_container->fd.control_base_addr + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) data->current_irq_mask, data->num_of_irq_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (error < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) error = rmi_driver_process_reset_requests(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) error = rmi_driver_process_config_requests(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static int rmi_read_pdt_entry(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct pdt_entry *entry, u16 pdt_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) u8 buf[RMI_PDT_ENTRY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) pdt_address, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) entry->page_start = pdt_address & RMI4_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) entry->query_base_addr = buf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) entry->command_base_addr = buf[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) entry->control_base_addr = buf[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) entry->data_base_addr = buf[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) entry->function_number = buf[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct rmi_function_descriptor *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) fd->query_base_addr = pdt->query_base_addr + pdt->page_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) fd->command_base_addr = pdt->command_base_addr + pdt->page_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) fd->control_base_addr = pdt->control_base_addr + pdt->page_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) fd->data_base_addr = pdt->data_base_addr + pdt->page_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) fd->function_number = pdt->function_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) fd->interrupt_source_count = pdt->interrupt_source_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) fd->function_version = pdt->function_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) #define RMI_SCAN_CONTINUE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) #define RMI_SCAN_DONE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int *empty_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) int (*callback)(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) const struct pdt_entry *entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct pdt_entry pdt_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) u16 page_start = RMI4_PAGE_SIZE * page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) u16 pdt_start = page_start + PDT_START_SCAN_LOCATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) u16 pdt_end = page_start + PDT_END_SCAN_LOCATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) u16 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (RMI4_END_OF_PDT(pdt_entry.function_number))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) retval = callback(rmi_dev, ctx, &pdt_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (retval != RMI_SCAN_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * Count number of empty PDT pages. If a gap of two pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * or more is found, stop scanning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (addr == pdt_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ++*empty_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *empty_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return (data->bootloader_mode || *empty_pages >= 2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int (*callback)(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) void *ctx, const struct pdt_entry *entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int empty_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int retval = RMI_SCAN_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) for (page = 0; page <= RMI4_MAX_PAGE; page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ctx, callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (retval != RMI_SCAN_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return retval < 0 ? retval : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int rmi_read_register_desc(struct rmi_device *d, u16 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct rmi_register_descriptor *rdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) u8 size_presence_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) u8 buf[35];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int presense_offset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) u8 *struct_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int map_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * The first register of the register descriptor is the size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * the register descriptor's presense register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ret = rmi_read(d, addr, &size_presence_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ++addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (size_presence_reg < 0 || size_presence_reg > 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) memset(buf, 0, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * The presence register contains the size of the register structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * and a bitmap which identified which packet registers are present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * for this particular register type (ie query, control, or data).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ret = rmi_read_block(d, addr, buf, size_presence_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ++addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (buf[0] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) presense_offset = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) rdesc->struct_size = buf[1] | (buf[2] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) rdesc->struct_size = buf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) for (i = presense_offset; i < size_presence_reg; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) for (b = 0; b < 8; b++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (buf[i] & (0x1 << b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) bitmap_set(rdesc->presense_map, map_offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ++map_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) rdesc->num_registers = bitmap_weight(rdesc->presense_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) RMI_REG_DESC_PRESENSE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) rdesc->registers = devm_kcalloc(&d->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) rdesc->num_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) sizeof(struct rmi_register_desc_item),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!rdesc->registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Allocate a temporary buffer to hold the register structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * I'm not using devm_kzalloc here since it will not be retained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * after exiting this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (!struct_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * The register structure contains information about every packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * register of this type. This includes the size of the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * register and a bitmap of all subpackets contained in the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto free_struct_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) for (i = 0; i < rdesc->num_registers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct rmi_register_desc_item *item = &rdesc->registers[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) int reg_size = struct_buf[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ++offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (reg_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) reg_size = struct_buf[offset] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) (struct_buf[offset + 1] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) offset += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (reg_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) reg_size = struct_buf[offset] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) (struct_buf[offset + 1] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) (struct_buf[offset + 2] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) (struct_buf[offset + 3] << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) offset += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) item->reg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) item->reg_size = reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) map_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) for (b = 0; b < 7; b++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (struct_buf[offset] & (0x1 << b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) bitmap_set(item->subpacket_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) map_offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ++map_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) } while (struct_buf[offset++] & 0x80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) item->num_subpackets = bitmap_weight(item->subpacket_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) RMI_REG_DESC_SUBPACKET_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) rmi_dbg(RMI_DEBUG_CORE, &d->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) "%s: reg: %d reg size: %ld subpackets: %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) item->reg, item->reg_size, item->num_subpackets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) reg = find_next_bit(rdesc->presense_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) RMI_REG_DESC_PRESENSE_BITS, reg + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) free_struct_buff:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) kfree(struct_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) const struct rmi_register_desc_item *rmi_get_register_desc_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct rmi_register_descriptor *rdesc, u16 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) const struct rmi_register_desc_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) for (i = 0; i < rdesc->num_registers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) item = &rdesc->registers[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (item->reg == reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) const struct rmi_register_desc_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) size_t size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) for (i = 0; i < rdesc->num_registers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) item = &rdesc->registers[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) size += item->reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Compute the register offset relative to the base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int rmi_register_desc_calc_reg_offset(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct rmi_register_descriptor *rdesc, u16 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) const struct rmi_register_desc_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) for (i = 0; i < rdesc->num_registers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) item = &rdesc->registers[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (item->reg == reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ++offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) u8 subpacket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) subpacket) == subpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) const struct pdt_entry *pdt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (pdt->function_number == 0x34 && pdt->function_version > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) dev_err(&rmi_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) "Failed to read F34 status: %d.\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (status & BIT(7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) data->bootloader_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) } else if (pdt->function_number == 0x01) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) dev_err(&rmi_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) "Failed to read F01 status: %d.\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (status & BIT(6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) data->bootloader_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static int rmi_count_irqs(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) void *ctx, const struct pdt_entry *pdt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) int *irq_count = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) *irq_count += pdt->interrupt_source_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ret = rmi_check_bootloader_mode(rmi_dev, pdt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return RMI_SCAN_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) const struct pdt_entry *pdt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (pdt->function_number == 0x01) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) u16 cmd_addr = pdt->page_start + pdt->command_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) u8 cmd_buf = RMI_DEVICE_RESET_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) const struct rmi_device_platform_data *pdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) rmi_get_platform_data(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (rmi_dev->xport->ops->reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) error = rmi_dev->xport->ops->reset(rmi_dev->xport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) cmd_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return RMI_SCAN_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) dev_err(&rmi_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) "Initial reset failed. Code = %d.\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return RMI_SCAN_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* F01 should always be on page 0. If we don't find it there, fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static int rmi_create_function(struct rmi_device *rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) void *ctx, const struct pdt_entry *pdt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct device *dev = &rmi_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct rmi_driver_data *data = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) int *current_irq_count = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct rmi_function *fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) pdt->function_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) fn = kzalloc(sizeof(struct rmi_function) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) dev_err(dev, "Failed to allocate memory for F%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) pdt->function_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) INIT_LIST_HEAD(&fn->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) rmi_driver_copy_pdt_to_fd(pdt, &fn->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) fn->rmi_dev = rmi_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) fn->num_of_irqs = pdt->interrupt_source_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) fn->irq_pos = *current_irq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) *current_irq_count += fn->num_of_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) for (i = 0; i < fn->num_of_irqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) set_bit(fn->irq_pos + i, fn->irq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) error = rmi_register_function(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (pdt->function_number == 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) data->f01_container = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) else if (pdt->function_number == 0x34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) data->f34_container = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) list_add_tail(&fn->node, &data->function_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return RMI_SCAN_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int irq = pdata->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) int irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) mutex_lock(&data->enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (data->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) data->enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) retval = disable_irq_wake(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dev_warn(&rmi_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) "Failed to disable irq for wake: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * Call rmi_process_interrupt_requests() after enabling irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * otherwise we may lose interrupt on edge-triggered systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) irq_flags = irq_get_trigger_type(pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (irq_flags & IRQ_TYPE_EDGE_BOTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) rmi_process_interrupt_requests(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) mutex_unlock(&data->enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct rmi4_attn_data attn_data = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) int irq = pdata->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) int retval, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) mutex_lock(&data->enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (!data->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) data->enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) disable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) retval = enable_irq_wake(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) dev_warn(&rmi_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) "Failed to enable irq for wake: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* make sure the fifo is clean */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) while (!kfifo_is_empty(&data->attn_fifo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) count = kfifo_get(&data->attn_fifo, &attn_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) kfree(attn_data.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) mutex_unlock(&data->enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) retval = rmi_suspend_functions(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) rmi_disable_irq(rmi_dev, enable_wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) EXPORT_SYMBOL_GPL(rmi_driver_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) rmi_enable_irq(rmi_dev, clear_wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) retval = rmi_resume_functions(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) EXPORT_SYMBOL_GPL(rmi_driver_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static int rmi_driver_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct rmi_device *rmi_dev = to_rmi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) rmi_disable_irq(rmi_dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) irq_domain_remove(data->irqdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) data->irqdomain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) rmi_f34_remove_sysfs(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) rmi_free_function_list(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static int rmi_driver_of_probe(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct rmi_device_platform_data *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) "syna,reset-delay-ms", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static inline int rmi_driver_of_probe(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct rmi_device_platform_data *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) int rmi_probe_interrupts(struct rmi_driver_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct rmi_device *rmi_dev = data->rmi_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct device *dev = &rmi_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int irq_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * We need to count the IRQs and allocate their storage before scanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * the PDT and creating the function entries, because adding a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * function can trigger events that result in the IRQ related storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * being accessed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) data->bootloader_mode = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) dev_err(dev, "IRQ counting failed with code %d.\n", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (data->bootloader_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) dev_warn(dev, "Device in bootloader mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* Allocate and register a linear revmap irq_domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) &irq_domain_simple_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (!data->irqdomain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) data->irq_count = irq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) data->num_of_irq_regs = (data->irq_count + 7) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) data->irq_memory = devm_kcalloc(dev, size, 4, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!data->irq_memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) dev_err(dev, "Failed to allocate memory for irq masks.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) data->irq_status = data->irq_memory + size * 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) data->fn_irq_bits = data->irq_memory + size * 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) data->current_irq_mask = data->irq_memory + size * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) data->new_irq_mask = data->irq_memory + size * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) int rmi_init_functions(struct rmi_driver_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct rmi_device *rmi_dev = data->rmi_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct device *dev = &rmi_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) int irq_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) dev_err(dev, "Function creation failed with code %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) goto err_destroy_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (!data->f01_container) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) dev_err(dev, "Missing F01 container!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) goto err_destroy_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) retval = rmi_read_block(rmi_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) data->f01_container->fd.control_base_addr + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) data->current_irq_mask, data->num_of_irq_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) dev_err(dev, "%s: Failed to read current IRQ mask.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) goto err_destroy_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) err_destroy_functions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) rmi_free_function_list(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static int rmi_driver_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct rmi_driver *rmi_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) struct rmi_driver_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) struct rmi_device_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct rmi_device *rmi_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (!rmi_is_physical_device(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) rmi_dev = to_rmi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) rmi_driver = to_rmi_driver(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) rmi_dev->driver = rmi_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) pdata = rmi_get_platform_data(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (rmi_dev->xport->dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) INIT_LIST_HEAD(&data->function_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) data->rmi_dev = rmi_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) dev_set_drvdata(&rmi_dev->dev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * Right before a warm boot, the sensor might be in some unusual state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * such as F54 diagnostics, or F34 bootloader mode after a firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * or configuration update. In order to clear the sensor to a known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * state and/or apply any updates, we issue a initial reset to clear any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * previous settings and force it into normal operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * We have to do this before actually building the PDT because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * the reflash updates (if any) might cause various registers to move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * For a number of reasons, this initial reset may fail to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * within the specified time, but we'll still be able to bring up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * driver normally after that failure. This occurs most commonly in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * a cold boot situation (where then firmware takes longer to come up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * than from a warm boot) and the reset_delay_ms in the platform data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * has been set too short to accommodate that. Since the sensor will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * eventually come up and be usable, we don't want to just fail here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * and leave the customer's device unusable. So we warn them, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * continue processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * we'll print out a warning and continue since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * failure to get the PDT properties is not a cause to fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) PDT_PROPERTIES_LOCATION, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) mutex_init(&data->irq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) mutex_init(&data->enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) retval = rmi_probe_interrupts(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (rmi_dev->xport->input) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * The transport driver already has an input device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * In some cases it is preferable to reuse the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * devices input device instead of creating a new one here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * One example is some HID touchpads report "pass-through"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * button events are not reported by rmi registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) data->input = rmi_dev->xport->input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) data->input = devm_input_allocate_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (!data->input) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) dev_err(dev, "%s: Failed to allocate input device.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) rmi_driver_set_input_params(rmi_dev, data->input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) "%s/input0", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) retval = rmi_init_functions(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) retval = rmi_f34_create_sysfs(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (data->input) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) rmi_driver_set_input_name(rmi_dev, data->input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (!rmi_dev->xport->input) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) retval = input_register_device(data->input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) dev_err(dev, "%s: Failed to register input device.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) goto err_destroy_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) retval = rmi_irq_init(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) goto err_destroy_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (data->f01_container->dev.driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /* Driver already bound, so enable ATTN now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) retval = rmi_enable_sensor(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) goto err_disable_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) err_disable_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) rmi_disable_irq(rmi_dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) err_destroy_functions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) rmi_free_function_list(rmi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static struct rmi_driver rmi_physical_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) .name = "rmi4_physical",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) .bus = &rmi_bus_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) .probe = rmi_driver_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) .remove = rmi_driver_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) .reset_handler = rmi_driver_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) .clear_irq_bits = rmi_driver_clear_irq_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) .set_irq_bits = rmi_driver_set_irq_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) .set_input_params = rmi_driver_set_input_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) bool rmi_is_physical_driver(struct device_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return drv == &rmi_physical_driver.driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) int __init rmi_register_physical_driver(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) error = driver_register(&rmi_physical_driver.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) pr_err("%s: driver register failed, code=%d.\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) void __exit rmi_unregister_physical_driver(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) driver_unregister(&rmi_physical_driver.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }