^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * User-space I/O driver support for HID subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2012 David Herrmann
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cred.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/hid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/input.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uhid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define UHID_NAME "uhid"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define UHID_BUFSIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct uhid_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct mutex devlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* This flag tracks whether the HID device is usable for commands from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * userspace. The flag is already set before hid_add_device(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * runs in workqueue context, to allow hid_add_device() to communicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * with userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * However, if hid_add_device() fails, the flag is cleared without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * holding devlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * We guarantee that if @running changes from true to false while you're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * holding @devlock, it's still fine to access @hid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) bool running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __u8 *rd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) uint rd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct hid_device *hid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct uhid_event input_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) wait_queue_head_t waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) spinlock_t qlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) __u8 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __u8 tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct uhid_event *outq[UHID_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* blocking GET_REPORT support; state changes protected by qlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct mutex report_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) wait_queue_head_t report_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) bool report_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u32 report_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u32 report_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct uhid_event report_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct work_struct worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static struct miscdevice uhid_misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void uhid_device_add_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ret = hid_add_device(uhid->hid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* We used to call hid_destroy_device() here, but that's really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * messy to get right because we have to coordinate with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * concurrent writes from userspace that might be in the middle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * of using uhid->hid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Just leave uhid->hid as-is for now, and clean it up when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * userspace tries to close or reinitialize the uhid instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * However, we do have to clear the ->running flag and do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * wakeup to make sure userspace knows that the device is gone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) uhid->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) wake_up_interruptible(&uhid->report_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) __u8 newhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) newhead = (uhid->head + 1) % UHID_BUFSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (newhead != uhid->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) uhid->outq[uhid->head] = ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) uhid->head = newhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) wake_up_interruptible(&uhid->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) hid_warn(uhid->hid, "Output queue is full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) kfree(ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct uhid_event *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ev = kzalloc(sizeof(*ev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (!ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ev->type = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spin_lock_irqsave(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) uhid_queue(uhid, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spin_unlock_irqrestore(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int uhid_hid_start(struct hid_device *hid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct uhid_device *uhid = hid->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct uhid_event *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ev = kzalloc(sizeof(*ev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (!ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ev->type = UHID_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (hid->report_enum[HID_FEATURE_REPORT].numbered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (hid->report_enum[HID_INPUT_REPORT].numbered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_lock_irqsave(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) uhid_queue(uhid, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spin_unlock_irqrestore(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void uhid_hid_stop(struct hid_device *hid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct uhid_device *uhid = hid->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) hid->claimed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) uhid_queue_event(uhid, UHID_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static int uhid_hid_open(struct hid_device *hid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct uhid_device *uhid = hid->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return uhid_queue_event(uhid, UHID_OPEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void uhid_hid_close(struct hid_device *hid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct uhid_device *uhid = hid->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) uhid_queue_event(uhid, UHID_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static int uhid_hid_parse(struct hid_device *hid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct uhid_device *uhid = hid->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* must be called with report_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct uhid_event *ev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) __u32 *report_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) spin_lock_irqsave(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) *report_id = ++uhid->report_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) uhid->report_type = ev->type + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) uhid->report_running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) uhid_queue(uhid, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) spin_unlock_irqrestore(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ret = wait_event_interruptible_timeout(uhid->report_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) !uhid->report_running || !uhid->running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 5 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!ret || !uhid->running || uhid->report_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ret = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) uhid->report_running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) const struct uhid_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) spin_lock_irqsave(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* id for old report; drop it silently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (uhid->report_type != ev->type || uhid->report_id != id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!uhid->report_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) memcpy(&uhid->report_buf, ev, sizeof(*ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) uhid->report_running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) wake_up_interruptible(&uhid->report_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) spin_unlock_irqrestore(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u8 *buf, size_t count, u8 rtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct uhid_device *uhid = hid->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct uhid_get_report_reply_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct uhid_event *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!uhid->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ev = kzalloc(sizeof(*ev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ev->type = UHID_GET_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ev->u.get_report.rnum = rnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ev->u.get_report.rtype = rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ret = mutex_lock_interruptible(&uhid->report_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) kfree(ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* this _always_ takes ownership of @ev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) req = &uhid->report_buf.u.get_report_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (req->err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) memcpy(buf, req->data, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) mutex_unlock(&uhid->report_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) const u8 *buf, size_t count, u8 rtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct uhid_device *uhid = hid->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct uhid_event *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!uhid->running || count > UHID_DATA_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ev = kzalloc(sizeof(*ev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ev->type = UHID_SET_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ev->u.set_report.rnum = rnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ev->u.set_report.rtype = rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ev->u.set_report.size = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) memcpy(ev->u.set_report.data, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ret = mutex_lock_interruptible(&uhid->report_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) kfree(ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* this _always_ takes ownership of @ev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (uhid->report_buf.u.set_report_reply.err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) mutex_unlock(&uhid->report_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) __u8 *buf, size_t len, unsigned char rtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int reqtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) u8 u_rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) switch (rtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) case HID_FEATURE_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u_rtype = UHID_FEATURE_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) case HID_OUTPUT_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) u_rtype = UHID_OUTPUT_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) case HID_INPUT_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u_rtype = UHID_INPUT_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) switch (reqtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) case HID_REQ_GET_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) case HID_REQ_SET_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned char report_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct uhid_device *uhid = hid->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) __u8 rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct uhid_event *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) switch (report_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) case HID_FEATURE_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) rtype = UHID_FEATURE_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) case HID_OUTPUT_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) rtype = UHID_OUTPUT_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (count < 1 || count > UHID_DATA_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ev = kzalloc(sizeof(*ev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (!ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ev->type = UHID_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ev->u.output.size = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ev->u.output.rtype = rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) memcpy(ev->u.output.data, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) spin_lock_irqsave(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) uhid_queue(uhid, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) spin_unlock_irqrestore(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct hid_ll_driver uhid_hid_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) .start = uhid_hid_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) .stop = uhid_hid_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) .open = uhid_hid_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) .close = uhid_hid_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) .parse = uhid_hid_parse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) .raw_request = uhid_hid_raw_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) .output_report = uhid_hid_output_report,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) EXPORT_SYMBOL_GPL(uhid_hid_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Apparently we haven't stepped on these rakes enough times yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct uhid_create_req_compat {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) __u8 name[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) __u8 phys[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) __u8 uniq[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) compat_uptr_t rd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) __u16 rd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) __u16 bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) __u32 vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) __u32 product;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) __u32 country;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static int uhid_event_from_user(const char __user *buffer, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct uhid_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (in_compat_syscall()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (get_user(type, buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (type == UHID_CREATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * This is our messed up request with compat pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * It is largish (more than 256 bytes) so we better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * allocate it from the heap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct uhid_create_req_compat *compat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) compat = kzalloc(sizeof(*compat), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (!compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) buffer += sizeof(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) len -= sizeof(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (copy_from_user(compat, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) min(len, sizeof(*compat)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) kfree(compat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Shuffle the data over to proper structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) event->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) memcpy(event->u.create.name, compat->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) sizeof(compat->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) memcpy(event->u.create.phys, compat->phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) sizeof(compat->phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) memcpy(event->u.create.uniq, compat->uniq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) sizeof(compat->uniq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) event->u.create.rd_data = compat_ptr(compat->rd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) event->u.create.rd_size = compat->rd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) event->u.create.bus = compat->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) event->u.create.vendor = compat->vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) event->u.create.product = compat->product;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) event->u.create.version = compat->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) event->u.create.country = compat->country;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) kfree(compat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* All others can be copied directly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (copy_from_user(event, buffer, min(len, sizeof(*event))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static int uhid_event_from_user(const char __user *buffer, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct uhid_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (copy_from_user(event, buffer, min(len, sizeof(*event))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static int uhid_dev_create2(struct uhid_device *uhid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) const struct uhid_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct hid_device *hid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) size_t rd_size, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) void *rd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (uhid->hid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) rd_size = ev->u.create2.rd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!rd_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) uhid->rd_size = rd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) uhid->rd_data = rd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) hid = hid_allocate_device();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (IS_ERR(hid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ret = PTR_ERR(hid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) strncpy(hid->name, ev->u.create2.name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) strncpy(hid->phys, ev->u.create2.phys, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) strncpy(hid->uniq, ev->u.create2.uniq, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) hid->ll_driver = &uhid_hid_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) hid->bus = ev->u.create2.bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) hid->vendor = ev->u.create2.vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) hid->product = ev->u.create2.product;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) hid->version = ev->u.create2.version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) hid->country = ev->u.create2.country;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) hid->driver_data = uhid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) hid->dev.parent = uhid_misc.this_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) uhid->hid = hid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) uhid->running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* Adding of a HID device is done through a worker, to allow HID drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * which use feature requests during .probe to work, without they would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * be blocked on devlock, which is held by uhid_char_write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) schedule_work(&uhid->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) kfree(uhid->rd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) uhid->rd_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) uhid->rd_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static int uhid_dev_create(struct uhid_device *uhid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct uhid_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct uhid_create_req orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) orig = ev->u.create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) ev->u.create2.rd_size = orig.rd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ev->u.create2.bus = orig.bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ev->u.create2.vendor = orig.vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ev->u.create2.product = orig.product;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ev->u.create2.version = orig.version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ev->u.create2.country = orig.country;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return uhid_dev_create2(uhid, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static int uhid_dev_destroy(struct uhid_device *uhid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!uhid->hid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) uhid->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) wake_up_interruptible(&uhid->report_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) cancel_work_sync(&uhid->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) hid_destroy_device(uhid->hid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) uhid->hid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) kfree(uhid->rd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (!uhid->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (!uhid->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static int uhid_dev_get_report_reply(struct uhid_device *uhid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct uhid_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (!uhid->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static int uhid_dev_set_report_reply(struct uhid_device *uhid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct uhid_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (!uhid->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static int uhid_char_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct uhid_device *uhid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!uhid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) mutex_init(&uhid->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) mutex_init(&uhid->report_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) spin_lock_init(&uhid->qlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) init_waitqueue_head(&uhid->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) init_waitqueue_head(&uhid->report_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) uhid->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) INIT_WORK(&uhid->worker, uhid_device_add_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) file->private_data = uhid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) stream_open(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static int uhid_char_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct uhid_device *uhid = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) uhid_dev_destroy(uhid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) for (i = 0; i < UHID_BUFSIZE; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) kfree(uhid->outq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) kfree(uhid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static ssize_t uhid_char_read(struct file *file, char __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct uhid_device *uhid = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* they need at least the "type" member of uhid_event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (count < sizeof(__u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) try_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (file->f_flags & O_NONBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (uhid->head == uhid->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ret = wait_event_interruptible(uhid->waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) uhid->head != uhid->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ret = mutex_lock_interruptible(&uhid->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (uhid->head == uhid->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mutex_unlock(&uhid->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) goto try_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) len = min(count, sizeof(**uhid->outq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) kfree(uhid->outq[uhid->tail]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) uhid->outq[uhid->tail] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) spin_lock_irqsave(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) spin_unlock_irqrestore(&uhid->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) mutex_unlock(&uhid->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return ret ? ret : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct uhid_device *uhid = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* we need at least the "type" member of uhid_event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (count < sizeof(__u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ret = mutex_lock_interruptible(&uhid->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) len = min(count, sizeof(uhid->input_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) switch (uhid->input_buf.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) case UHID_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * 'struct uhid_create_req' contains a __user pointer which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * copied from, so it's unsafe to allow this with elevated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * privileges (e.g. from a setuid binary) or via kernel_write().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (file->f_cred != current_cred() || uaccess_kernel()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) task_tgid_vnr(current), current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ret = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ret = uhid_dev_create(uhid, &uhid->input_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) case UHID_CREATE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ret = uhid_dev_create2(uhid, &uhid->input_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) case UHID_DESTROY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ret = uhid_dev_destroy(uhid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) case UHID_INPUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ret = uhid_dev_input(uhid, &uhid->input_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) case UHID_INPUT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) ret = uhid_dev_input2(uhid, &uhid->input_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) case UHID_GET_REPORT_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) case UHID_SET_REPORT_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) mutex_unlock(&uhid->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* return "count" not "len" to not confuse the caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct uhid_device *uhid = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) poll_wait(file, &uhid->waitq, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (uhid->head != uhid->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) mask |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static const struct file_operations uhid_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) .open = uhid_char_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) .release = uhid_char_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) .read = uhid_char_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) .write = uhid_char_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) .poll = uhid_char_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) .llseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static struct miscdevice uhid_misc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) .fops = &uhid_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) .minor = UHID_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) .name = UHID_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) module_misc_device(uhid_misc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) MODULE_ALIAS_MISCDEV(UHID_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) MODULE_ALIAS("devname:" UHID_NAME);