^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * drivers/uio/uio.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Userspace IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Base Functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/kobject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/uio_driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define UIO_MAX_DEVICES (1U << MINORBITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static int uio_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static struct cdev *uio_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static DEFINE_IDR(uio_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static const struct file_operations uio_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Protect idr accesses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static DEFINE_MUTEX(minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct uio_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct kobject kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct uio_mem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define to_map(map) container_of(map, struct uio_map, kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static ssize_t map_name_show(struct uio_mem *mem, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (unlikely(!mem->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) mem->name = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return sprintf(buf, "%s\n", mem->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static ssize_t map_addr_show(struct uio_mem *mem, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return sprintf(buf, "%pa\n", &mem->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static ssize_t map_size_show(struct uio_mem *mem, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return sprintf(buf, "%pa\n", &mem->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static ssize_t map_offset_show(struct uio_mem *mem, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return sprintf(buf, "0x%llx\n", (unsigned long long)mem->offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct map_sysfs_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct attribute attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ssize_t (*show)(struct uio_mem *, char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ssize_t (*store)(struct uio_mem *, const char *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static struct map_sysfs_entry name_attribute =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) __ATTR(name, S_IRUGO, map_name_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static struct map_sysfs_entry addr_attribute =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) __ATTR(addr, S_IRUGO, map_addr_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static struct map_sysfs_entry size_attribute =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __ATTR(size, S_IRUGO, map_size_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static struct map_sysfs_entry offset_attribute =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __ATTR(offset, S_IRUGO, map_offset_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static struct attribute *attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) &name_attribute.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) &addr_attribute.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) &size_attribute.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) &offset_attribute.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) NULL, /* need to NULL terminate the list of attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void map_release(struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct uio_map *map = to_map(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct uio_map *map = to_map(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct uio_mem *mem = map->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct map_sysfs_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) entry = container_of(attr, struct map_sysfs_entry, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (!entry->show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return entry->show(mem, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static const struct sysfs_ops map_sysfs_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) .show = map_type_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static struct kobj_type map_attr_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) .release = map_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) .sysfs_ops = &map_sysfs_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .default_attrs = attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct uio_portio {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct kobject kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct uio_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define to_portio(portio) container_of(portio, struct uio_portio, kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static ssize_t portio_name_show(struct uio_port *port, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (unlikely(!port->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) port->name = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return sprintf(buf, "%s\n", port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static ssize_t portio_start_show(struct uio_port *port, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return sprintf(buf, "0x%lx\n", port->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static ssize_t portio_size_show(struct uio_port *port, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return sprintf(buf, "0x%lx\n", port->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static ssize_t portio_porttype_show(struct uio_port *port, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) const char *porttypes[] = {"none", "x86", "gpio", "other"};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return sprintf(buf, "port_%s\n", porttypes[port->porttype]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct portio_sysfs_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct attribute attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ssize_t (*show)(struct uio_port *, char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ssize_t (*store)(struct uio_port *, const char *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static struct portio_sysfs_entry portio_name_attribute =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) __ATTR(name, S_IRUGO, portio_name_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static struct portio_sysfs_entry portio_start_attribute =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) __ATTR(start, S_IRUGO, portio_start_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static struct portio_sysfs_entry portio_size_attribute =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) __ATTR(size, S_IRUGO, portio_size_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static struct portio_sysfs_entry portio_porttype_attribute =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static struct attribute *portio_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) &portio_name_attribute.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) &portio_start_attribute.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) &portio_size_attribute.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) &portio_porttype_attribute.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void portio_release(struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct uio_portio *portio = to_portio(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) kfree(portio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct uio_portio *portio = to_portio(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct uio_port *port = portio->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct portio_sysfs_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) entry = container_of(attr, struct portio_sysfs_entry, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (!entry->show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return entry->show(port, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static const struct sysfs_ops portio_sysfs_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) .show = portio_type_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static struct kobj_type portio_attr_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) .release = portio_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .sysfs_ops = &portio_sysfs_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) .default_attrs = portio_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static ssize_t name_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct uio_device *idev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) mutex_lock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!idev->info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dev_err(dev, "the device has been unregistered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ret = sprintf(buf, "%s\n", idev->info->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static DEVICE_ATTR_RO(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static ssize_t version_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct uio_device *idev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) mutex_lock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!idev->info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dev_err(dev, "the device has been unregistered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ret = sprintf(buf, "%s\n", idev->info->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static DEVICE_ATTR_RO(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static ssize_t event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct uio_device *idev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static DEVICE_ATTR_RO(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static struct attribute *uio_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) &dev_attr_name.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) &dev_attr_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) &dev_attr_event.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ATTRIBUTE_GROUPS(uio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* UIO class infrastructure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static struct class uio_class = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .name = "uio",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .dev_groups = uio_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static bool uio_class_registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * device functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static int uio_dev_add_attributes(struct uio_device *idev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int mi, pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int map_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int portio_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct uio_mem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct uio_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct uio_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct uio_portio *portio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) mem = &idev->info->mem[mi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (mem->size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!map_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) map_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) idev->map_dir = kobject_create_and_add("maps",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) &idev->dev.kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (!idev->map_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) goto err_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) map = kzalloc(sizeof(*map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (!map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) goto err_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) kobject_init(&map->kobj, &map_attr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) map->mem = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) mem->map = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) goto err_map_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ret = kobject_uevent(&map->kobj, KOBJ_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) goto err_map_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) port = &idev->info->port[pi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (port->size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!portio_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) portio_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) idev->portio_dir = kobject_create_and_add("portio",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) &idev->dev.kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!idev->portio_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) goto err_portio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) portio = kzalloc(sizeof(*portio), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!portio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) goto err_portio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) kobject_init(&portio->kobj, &portio_attr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) portio->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) port->portio = portio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ret = kobject_add(&portio->kobj, idev->portio_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) "port%d", pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto err_portio_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ret = kobject_uevent(&portio->kobj, KOBJ_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) goto err_portio_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) err_portio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) pi--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) err_portio_kobj:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) for (; pi >= 0; pi--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) port = &idev->info->port[pi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) portio = port->portio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) kobject_put(&portio->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) kobject_put(idev->portio_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) err_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) mi--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) err_map_kobj:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) for (; mi >= 0; mi--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) mem = &idev->info->mem[mi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) map = mem->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) kobject_put(&map->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) kobject_put(idev->map_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) dev_err(&idev->dev, "error creating sysfs files (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void uio_dev_del_attributes(struct uio_device *idev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct uio_mem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct uio_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) for (i = 0; i < MAX_UIO_MAPS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) mem = &idev->info->mem[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (mem->size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) kobject_put(&mem->map->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) kobject_put(idev->map_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) port = &idev->info->port[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (port->size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) kobject_put(&port->portio->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) kobject_put(idev->portio_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static int uio_get_minor(struct uio_device *idev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) mutex_lock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (retval >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) idev->minor = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) } else if (retval == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dev_err(&idev->dev, "too many uio devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) mutex_unlock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void uio_free_minor(unsigned long minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) mutex_lock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) idr_remove(&uio_idr, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) mutex_unlock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * uio_event_notify - trigger an interrupt event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * @info: UIO device capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) void uio_event_notify(struct uio_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct uio_device *idev = info->uio_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) atomic_inc(&idev->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) wake_up_interruptible(&idev->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) EXPORT_SYMBOL_GPL(uio_event_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * uio_interrupt - hardware interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * @dev_id: Pointer to the devices uio_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static irqreturn_t uio_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct uio_device *idev = (struct uio_device *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) irqreturn_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ret = idev->info->handler(irq, idev->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (ret == IRQ_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) uio_event_notify(idev->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct uio_listener {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct uio_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) s32 event_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static int uio_open(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct uio_device *idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct uio_listener *listener;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) mutex_lock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) idev = idr_find(&uio_idr, iminor(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mutex_unlock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!idev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) get_device(&idev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!try_module_get(idev->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) goto err_module_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) listener = kmalloc(sizeof(*listener), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (!listener) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) goto err_alloc_listener;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) listener->dev = idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) listener->event_count = atomic_read(&idev->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) filep->private_data = listener;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) mutex_lock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!idev->info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) goto err_infoopen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (idev->info->open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ret = idev->info->open(idev->info, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto err_infoopen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) err_infoopen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) kfree(listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) err_alloc_listener:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) module_put(idev->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) err_module_get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) put_device(&idev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static int uio_fasync(int fd, struct file *filep, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct uio_listener *listener = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct uio_device *idev = listener->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return fasync_helper(fd, filep, on, &idev->async_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static int uio_release(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct uio_listener *listener = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct uio_device *idev = listener->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) mutex_lock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (idev->info && idev->info->release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ret = idev->info->release(idev->info, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) module_put(idev->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) kfree(listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) put_device(&idev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static __poll_t uio_poll(struct file *filep, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct uio_listener *listener = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct uio_device *idev = listener->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) __poll_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) mutex_lock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!idev->info || !idev->info->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) poll_wait(filep, &idev->wait, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (listener->event_count != atomic_read(&idev->event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static ssize_t uio_read(struct file *filep, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct uio_listener *listener = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct uio_device *idev = listener->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ssize_t retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) s32 event_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (count != sizeof(s32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) add_wait_queue(&idev->wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) mutex_lock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!idev->info || !idev->info->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) event_count = atomic_read(&idev->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (event_count != listener->event_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (copy_to_user(buf, &event_count, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) listener->event_count = event_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) retval = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (filep->f_flags & O_NONBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) retval = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) remove_wait_queue(&idev->wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static ssize_t uio_write(struct file *filep, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct uio_listener *listener = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct uio_device *idev = listener->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ssize_t retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) s32 irq_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (count != sizeof(s32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (copy_from_user(&irq_on, buf, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) mutex_lock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!idev->info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (!idev->info->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (!idev->info->irqcontrol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) retval = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) retval = idev->info->irqcontrol(idev->info, irq_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return retval ? retval : sizeof(s32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static int uio_find_mem_index(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct uio_device *idev = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (vma->vm_pgoff < MAX_UIO_MAPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (idev->info->mem[vma->vm_pgoff].size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return (int)vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct uio_device *idev = vmf->vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) vm_fault_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) int mi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) mutex_lock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!idev->info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ret = VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) mi = uio_find_mem_index(vmf->vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (mi < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ret = VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * to use mem[N].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) offset = (vmf->pgoff - mi) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) page = virt_to_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) page = vmalloc_to_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) vmf->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static const struct vm_operations_struct uio_logical_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) .fault = uio_vma_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static int uio_mmap_logical(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) vma->vm_ops = &uio_logical_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static const struct vm_operations_struct uio_physical_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) #ifdef CONFIG_HAVE_IOREMAP_PROT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) .access = generic_access_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static int uio_mmap_physical(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct uio_device *idev = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) int mi = uio_find_mem_index(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct uio_mem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (mi < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) mem = idev->info->mem + mi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (mem->addr & ~PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (vma->vm_end - vma->vm_start > mem->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) vma->vm_ops = &uio_physical_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (idev->info->mem[mi].memtype == UIO_MEM_PHYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * We cannot use the vm_iomap_memory() helper here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * because vma->vm_pgoff is the map index we looked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * up above in uio_find_mem_index(), rather than an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * actual page offset into the mmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * So we just do the physical mmap without a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return remap_pfn_range(vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) mem->addr >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) vma->vm_end - vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct uio_listener *listener = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct uio_device *idev = listener->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) int mi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unsigned long requested_pages, actual_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (vma->vm_end < vma->vm_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) vma->vm_private_data = idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) mutex_lock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!idev->info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) mi = uio_find_mem_index(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (mi < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) requested_pages = vma_pages(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (requested_pages > actual_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (idev->info->mmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ret = idev->info->mmap(idev->info, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) switch (idev->info->mem[mi].memtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) case UIO_MEM_IOVA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) case UIO_MEM_PHYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ret = uio_mmap_physical(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) case UIO_MEM_LOGICAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) case UIO_MEM_VIRTUAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ret = uio_mmap_logical(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static const struct file_operations uio_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) .open = uio_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) .release = uio_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) .read = uio_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) .write = uio_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) .mmap = uio_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) .poll = uio_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) .fasync = uio_fasync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) .llseek = noop_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static int uio_major_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static const char name[] = "uio";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct cdev *cdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) dev_t uio_dev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) cdev = cdev_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) goto out_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) cdev->owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) cdev->ops = &uio_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) kobject_set_name(&cdev->kobj, "%s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) uio_major = MAJOR(uio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) uio_cdev = cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) kobject_put(&cdev->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) out_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static void uio_major_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) cdev_del(uio_cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static int init_uio_class(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* This is the first time in here, set everything up properly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ret = uio_major_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ret = class_register(&uio_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) printk(KERN_ERR "class_register failed for uio\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto err_class_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) uio_class_registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) err_class_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) uio_major_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static void release_uio_class(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) uio_class_registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) class_unregister(&uio_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) uio_major_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static void uio_device_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct uio_device *idev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) kfree(idev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * uio_register_device - register a new userspace IO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * @owner: module that creates the new device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * @parent: parent device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * @info: UIO device capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * returns zero on success or a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) int __uio_register_device(struct module *owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct device *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct uio_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct uio_device *idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!uio_class_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (!parent || !info || !info->name || !info->version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) info->uio_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) idev = kzalloc(sizeof(*idev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (!idev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) idev->owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) idev->info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) mutex_init(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) init_waitqueue_head(&idev->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) atomic_set(&idev->event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ret = uio_get_minor(idev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) kfree(idev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) device_initialize(&idev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) idev->dev.devt = MKDEV(uio_major, idev->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) idev->dev.class = &uio_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) idev->dev.parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) idev->dev.release = uio_device_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) dev_set_drvdata(&idev->dev, idev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ret = dev_set_name(&idev->dev, "uio%d", idev->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) goto err_device_create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ret = device_add(&idev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) goto err_device_create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ret = uio_dev_add_attributes(idev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) goto err_uio_dev_add_attributes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) info->uio_dev = idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * Note that we deliberately don't use devm_request_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * here. The parent module can unregister the UIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * and call pci_disable_msi, which requires that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * irq has been freed. However, the device may have open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * FDs at the time of unregister and therefore may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * freed until they are released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ret = request_irq(info->irq, uio_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) info->irq_flags, info->name, idev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) info->uio_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) goto err_request_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) err_request_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) uio_dev_del_attributes(idev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) err_uio_dev_add_attributes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) device_del(&idev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) err_device_create:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) uio_free_minor(idev->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) put_device(&idev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) EXPORT_SYMBOL_GPL(__uio_register_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static void devm_uio_unregister_device(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) uio_unregister_device(*(struct uio_info **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * devm_uio_register_device - Resource managed uio_register_device()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * @owner: module that creates the new device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * @parent: parent device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * @info: UIO device capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * returns zero on success or a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) int __devm_uio_register_device(struct module *owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct device *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct uio_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct uio_info **ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ptr = devres_alloc(devm_uio_unregister_device, sizeof(*ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) *ptr = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ret = __uio_register_device(owner, parent, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) devres_add(parent, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) EXPORT_SYMBOL_GPL(__devm_uio_register_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * uio_unregister_device - unregister a industrial IO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * @info: UIO device capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) void uio_unregister_device(struct uio_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct uio_device *idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) unsigned long minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (!info || !info->uio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) idev = info->uio_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) minor = idev->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) mutex_lock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) uio_dev_del_attributes(idev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (info->irq && info->irq != UIO_IRQ_CUSTOM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) free_irq(info->irq, idev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) idev->info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) mutex_unlock(&idev->info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) wake_up_interruptible(&idev->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) kill_fasync(&idev->async_queue, SIGIO, POLL_HUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) device_unregister(&idev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) uio_free_minor(minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) EXPORT_SYMBOL_GPL(uio_unregister_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static int __init uio_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return init_uio_class();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static void __exit uio_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) release_uio_class();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) idr_destroy(&uio_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) module_init(uio_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) module_exit(uio_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) MODULE_LICENSE("GPL v2");