^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Generic implementation of a polled input device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2007 Dmitry Torokhov
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/input-polldev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) MODULE_DESCRIPTION("Generic implementation of a polled input device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static void input_polldev_queue_work(struct input_polled_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) delay = msecs_to_jiffies(dev->poll_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (delay >= HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) delay = round_jiffies_relative(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) queue_delayed_work(system_freezable_wq, &dev->work, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static void input_polled_device_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct input_polled_dev *dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) container_of(work, struct input_polled_dev, work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) dev->poll(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) input_polldev_queue_work(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static int input_open_polled_device(struct input_dev *input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct input_polled_dev *dev = input_get_drvdata(input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (dev->open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) dev->open(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* Only start polling if polling is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (dev->poll_interval > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) dev->poll(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) input_polldev_queue_work(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void input_close_polled_device(struct input_dev *input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct input_polled_dev *dev = input_get_drvdata(input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) cancel_delayed_work_sync(&dev->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (dev->close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) dev->close(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* SYSFS interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static ssize_t input_polldev_get_poll(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct input_polled_dev *polldev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return sprintf(buf, "%d\n", polldev->poll_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static ssize_t input_polldev_set_poll(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct device_attribute *attr, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct input_polled_dev *polldev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct input_dev *input = polldev->input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned int interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) err = kstrtouint(buf, 0, &interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (interval < polldev->poll_interval_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (interval > polldev->poll_interval_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) mutex_lock(&input->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) polldev->poll_interval = interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (input->users) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) cancel_delayed_work_sync(&polldev->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (polldev->poll_interval > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) input_polldev_queue_work(polldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) mutex_unlock(&input->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static DEVICE_ATTR(poll, S_IRUGO | S_IWUSR, input_polldev_get_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) input_polldev_set_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static ssize_t input_polldev_get_max(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct input_polled_dev *polldev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return sprintf(buf, "%d\n", polldev->poll_interval_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static DEVICE_ATTR(max, S_IRUGO, input_polldev_get_max, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static ssize_t input_polldev_get_min(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct input_polled_dev *polldev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return sprintf(buf, "%d\n", polldev->poll_interval_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static DEVICE_ATTR(min, S_IRUGO, input_polldev_get_min, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static struct attribute *sysfs_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) &dev_attr_poll.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) &dev_attr_max.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) &dev_attr_min.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static struct attribute_group input_polldev_attribute_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .attrs = sysfs_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static const struct attribute_group *input_polldev_attribute_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) &input_polldev_attribute_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * input_allocate_polled_device - allocate memory for polled device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * The function allocates memory for a polled device and also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * for an input device associated with this polled device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct input_polled_dev *input_allocate_polled_device(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct input_polled_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dev = kzalloc(sizeof(struct input_polled_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) dev->input = input_allocate_device();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!dev->input) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) EXPORT_SYMBOL(input_allocate_polled_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct input_polled_devres {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct input_polled_dev *polldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static int devm_input_polldev_match(struct device *dev, void *res, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct input_polled_devres *devres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return devres->polldev == data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void devm_input_polldev_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct input_polled_devres *devres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct input_polled_dev *polldev = devres->polldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dev_dbg(dev, "%s: dropping reference/freeing %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) __func__, dev_name(&polldev->input->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) input_put_device(polldev->input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) kfree(polldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void devm_input_polldev_unregister(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct input_polled_devres *devres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct input_polled_dev *polldev = devres->polldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) dev_dbg(dev, "%s: unregistering device %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) __func__, dev_name(&polldev->input->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) input_unregister_device(polldev->input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Note that we are still holding extra reference to the input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * device so it will stick around until devm_input_polldev_release()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * devm_input_allocate_polled_device - allocate managed polled device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * @dev: device owning the polled device being created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * Returns prepared &struct input_polled_dev or %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * Managed polled input devices do not need to be explicitly unregistered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * or freed as it will be done automatically when owner device unbinds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * from * its driver (or binding fails). Once such managed polled device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * is allocated, it is ready to be set up and registered in the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * fashion as regular polled input devices (using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * input_register_polled_device() function).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * If you want to manually unregister and free such managed polled devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * it can be still done by calling input_unregister_polled_device() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * input_free_polled_device(), although it is rarely needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * NOTE: the owner device is set up as parent of input device and users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * should not override it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct input_polled_dev *polldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct input_polled_devres *devres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) devres = devres_alloc(devm_input_polldev_release, sizeof(*devres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!devres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) polldev = input_allocate_polled_device();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!polldev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) devres_free(devres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) polldev->input->dev.parent = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) polldev->devres_managed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) devres->polldev = polldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) devres_add(dev, devres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return polldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) EXPORT_SYMBOL(devm_input_allocate_polled_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * input_free_polled_device - free memory allocated for polled device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @dev: device to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * The function frees memory allocated for polling device and drops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * reference to the associated input device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void input_free_polled_device(struct input_polled_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (dev->devres_managed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) WARN_ON(devres_destroy(dev->input->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) devm_input_polldev_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) devm_input_polldev_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) input_put_device(dev->input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) EXPORT_SYMBOL(input_free_polled_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * input_register_polled_device - register polled device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * @dev: device to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * The function registers previously initialized polled input device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * with input layer. The device should be allocated with call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * input_allocate_polled_device(). Callers should also set up poll()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * method and set up capabilities (id, name, phys, bits) of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * corresponding input_dev structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int input_register_polled_device(struct input_polled_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct input_polled_devres *devres = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct input_dev *input = dev->input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (dev->devres_managed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) devres = devres_alloc(devm_input_polldev_unregister,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) sizeof(*devres), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!devres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) devres->polldev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) input_set_drvdata(input, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) INIT_DELAYED_WORK(&dev->work, input_polled_device_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!dev->poll_interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dev->poll_interval = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (!dev->poll_interval_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dev->poll_interval_max = dev->poll_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) input->open = input_open_polled_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) input->close = input_close_polled_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) input->dev.groups = input_polldev_attribute_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) error = input_register_device(input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) devres_free(devres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Take extra reference to the underlying input device so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * that it survives call to input_unregister_polled_device()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * and is deleted only after input_free_polled_device()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * has been invoked. This is needed to ease task of freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * sparse keymaps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) input_get_device(input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (dev->devres_managed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) dev_dbg(input->dev.parent, "%s: registering %s with devres.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) __func__, dev_name(&input->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) devres_add(input->dev.parent, devres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) EXPORT_SYMBOL(input_register_polled_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * input_unregister_polled_device - unregister polled device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * @dev: device to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * The function unregisters previously registered polled input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * device from input layer. Polling is stopped and device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * ready to be freed with call to input_free_polled_device().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void input_unregister_polled_device(struct input_polled_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (dev->devres_managed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) WARN_ON(devres_destroy(dev->input->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) devm_input_polldev_unregister,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) devm_input_polldev_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) input_unregister_device(dev->input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) EXPORT_SYMBOL(input_unregister_polled_device);