Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* Industrial I/O event handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2008 Jonathan Cameron
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Based on elements of hwmon and input subsystems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/anon_inodes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kfifo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/iio/iio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/iio/iio-opaque.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "iio_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/iio/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/iio/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * struct iio_event_interface - chrdev interface for an event line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * @wait:		wait queue to allow blocking reads of events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * @det_events:		list of detected events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * @dev_attr_list:	list of event interface sysfs attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * @flags:		file operations related flags including busy flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * @group:		event interface sysfs attribute group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * @read_lock:		lock to protect kfifo read operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) struct iio_event_interface {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	wait_queue_head_t	wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	DECLARE_KFIFO(det_events, struct iio_event_data, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct list_head	dev_attr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct attribute_group	group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct mutex		read_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) bool iio_event_enabled(const struct iio_event_interface *ev_int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * iio_push_event() - try to add event to the list for userspace reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * @indio_dev:		IIO device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * @ev_code:		What event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * @timestamp:		When the event occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Note: The caller must make sure that this function is not running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * concurrently for the same indio_dev more than once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * This function may be safely used as soon as a valid reference to iio_dev has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * been obtained via iio_device_alloc(), but any events that are submitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * before iio_device_register() has successfully completed will be silently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct iio_event_data ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (!ev_int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/* Does anyone care? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (iio_event_enabled(ev_int)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		ev.id = ev_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		ev.timestamp = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		copied = kfifo_put(&ev_int->det_events, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		if (copied != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			wake_up_poll(&ev_int->wait, EPOLLIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) EXPORT_SYMBOL(iio_push_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * iio_event_poll() - poll the event queue to find out if it has data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * @filep:	File structure pointer to identify the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * @wait:	Poll table pointer to add the wait queue on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *	   or a negative error code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static __poll_t iio_event_poll(struct file *filep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			     struct poll_table_struct *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct iio_dev *indio_dev = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	__poll_t events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (!indio_dev->info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		return events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	poll_wait(filep, &ev_int->wait, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (!kfifo_is_empty(&ev_int->det_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		events = EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	return events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static ssize_t iio_event_chrdev_read(struct file *filep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				     char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 				     size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 				     loff_t *f_ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct iio_dev *indio_dev = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	unsigned int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	if (!indio_dev->info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (count < sizeof(struct iio_event_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		if (kfifo_is_empty(&ev_int->det_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			if (filep->f_flags & O_NONBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 				return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			ret = wait_event_interruptible(ev_int->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 					!kfifo_is_empty(&ev_int->det_events) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 					indio_dev->info == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			if (indio_dev->info == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 				return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		if (mutex_lock_interruptible(&ev_int->read_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		mutex_unlock(&ev_int->read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		 * If we couldn't read anything from the fifo (a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		 * thread might have been faster) we either return -EAGAIN if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		 * the file descriptor is non-blocking, otherwise we go back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		 * sleep and wait for more data to arrive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (copied == 0 && (filep->f_flags & O_NONBLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	} while (copied == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct iio_dev *indio_dev = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	iio_device_put(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static const struct file_operations iio_event_chrdev_fileops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	.read =  iio_event_chrdev_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	.poll =  iio_event_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	.release = iio_event_chrdev_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	.llseek = noop_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int iio_event_getfd(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	if (ev_int == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	fd = mutex_lock_interruptible(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		fd = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	iio_device_get(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 				indio_dev, O_RDONLY | O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		iio_device_put(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		kfifo_reset_out(&ev_int->det_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	mutex_unlock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static const char * const iio_ev_type_text[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	[IIO_EV_TYPE_THRESH] = "thresh",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	[IIO_EV_TYPE_MAG] = "mag",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	[IIO_EV_TYPE_ROC] = "roc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	[IIO_EV_TYPE_CHANGE] = "change",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #ifdef CONFIG_NO_GKI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	[IIO_EV_TYPE_FIFO_FLUSH] = "fifo_flush",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static const char * const iio_ev_dir_text[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	[IIO_EV_DIR_EITHER] = "either",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	[IIO_EV_DIR_RISING] = "rising",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	[IIO_EV_DIR_FALLING] = "falling",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #ifdef CONFIG_NO_GKI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	[IIO_EV_DIR_FIFO_EMPTY] = "empty",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	[IIO_EV_DIR_FIFO_DATA] = "data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static const char * const iio_ev_info_text[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	[IIO_EV_INFO_ENABLE] = "en",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	[IIO_EV_INFO_VALUE] = "value",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	[IIO_EV_INFO_HYSTERESIS] = "hysteresis",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	[IIO_EV_INFO_PERIOD] = "period",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	[IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	[IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	return attr->c->event_spec[attr->address & 0xffff].dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	return attr->c->event_spec[attr->address & 0xffff].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return (attr->address >> 16) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static ssize_t iio_ev_state_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 				  struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 				  const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 				  size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	bool val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	ret = strtobool(buf, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	ret = indio_dev->info->write_event_config(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		this_attr->c, iio_ev_attr_type(this_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		iio_ev_attr_dir(this_attr), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	return (ret < 0) ? ret : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static ssize_t iio_ev_state_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 				 struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 				 char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	val = indio_dev->info->read_event_config(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		this_attr->c, iio_ev_attr_type(this_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		iio_ev_attr_dir(this_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (val < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		return sprintf(buf, "%d\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static ssize_t iio_ev_value_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 				 struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 				 char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	int val, val2, val_arr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	ret = indio_dev->info->read_event_value(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		this_attr->c, iio_ev_attr_type(this_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		&val, &val2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	val_arr[0] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	val_arr[1] = val2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	return iio_format_value(buf, ret, 2, val_arr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static ssize_t iio_ev_value_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 				  struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 				  const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 				  size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	int val, val2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	if (!indio_dev->info->write_event_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	ret = indio_dev->info->write_event_value(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		this_attr->c, iio_ev_attr_type(this_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		val, val2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static int iio_device_add_event(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	const struct iio_chan_spec *chan, unsigned int spec_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	enum iio_event_type type, enum iio_event_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	enum iio_shared_by shared_by, const unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	ssize_t (*show)(struct device *, struct device_attribute *, char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	ssize_t (*store)(struct device *, struct device_attribute *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		const char *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	unsigned int attrcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	char *postfix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	for_each_set_bit(i, mask, sizeof(*mask)*8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		if (i >= ARRAY_SIZE(iio_ev_info_text))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		if (dir != IIO_EV_DIR_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 					iio_ev_type_text[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 					iio_ev_dir_text[dir],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 					iio_ev_info_text[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			postfix = kasprintf(GFP_KERNEL, "%s_%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 					iio_ev_type_text[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 					iio_ev_info_text[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		if (postfix == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		if (i == IIO_EV_INFO_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			show = iio_ev_state_show;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			store = iio_ev_state_store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			show = iio_ev_value_show;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 			store = iio_ev_value_store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		ret = __iio_add_chan_devattr(postfix, chan, show, store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			 (i << 16) | spec_index, shared_by, &indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			&iio_dev_opaque->event_interface->dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		kfree(postfix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		attrcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	return attrcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	struct iio_chan_spec const *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	int ret = 0, i, attrcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	enum iio_event_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	enum iio_event_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	for (i = 0; i < chan->num_event_specs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		type = chan->event_spec[i].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		dir = chan->event_spec[i].dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			IIO_SEPARATE, &chan->event_spec[i].mask_separate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		attrcount += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			IIO_SHARED_BY_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			&chan->event_spec[i].mask_shared_by_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		attrcount += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			IIO_SHARED_BY_DIR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			&chan->event_spec[i].mask_shared_by_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		attrcount += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			IIO_SHARED_BY_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			&chan->event_spec[i].mask_shared_by_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		attrcount += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	ret = attrcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	int j, ret, attrcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	/* Dynamically created from the channels array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	for (j = 0; j < indio_dev->num_channels; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		ret = iio_device_add_event_sysfs(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 						 &indio_dev->channels[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		attrcount += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	return attrcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	for (j = 0; j < indio_dev->num_channels; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		if (indio_dev->channels[j].num_event_specs != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static void iio_setup_ev_int(struct iio_event_interface *ev_int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	INIT_KFIFO(ev_int->det_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	init_waitqueue_head(&ev_int->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	mutex_init(&ev_int->read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static const char *iio_event_group_name = "events";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int iio_device_register_eventset(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	struct iio_event_interface *ev_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	struct iio_dev_attr *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	int ret = 0, attrcount_orig = 0, attrcount, attrn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	struct attribute **attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	if (!(indio_dev->info->event_attrs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	      iio_check_for_dynamic_events(indio_dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	ev_int = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	if (ev_int == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	iio_dev_opaque->event_interface = ev_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	INIT_LIST_HEAD(&ev_int->dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	iio_setup_ev_int(ev_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	if (indio_dev->info->event_attrs != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		attr = indio_dev->info->event_attrs->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		while (*attr++ != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 			attrcount_orig++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	attrcount = attrcount_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	if (indio_dev->channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		ret = __iio_add_event_config_attrs(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 			goto error_free_setup_event_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		attrcount += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	ev_int->group.name = iio_event_group_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	ev_int->group.attrs = kcalloc(attrcount + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 				      sizeof(ev_int->group.attrs[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 				      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	if (ev_int->group.attrs == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		goto error_free_setup_event_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	if (indio_dev->info->event_attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		memcpy(ev_int->group.attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		       indio_dev->info->event_attrs->attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		       sizeof(ev_int->group.attrs[0]) * attrcount_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	attrn = attrcount_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	/* Add all elements from the list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	list_for_each_entry(p, &ev_int->dev_attr_list, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		ev_int->group.attrs[attrn++] = &p->dev_attr.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	indio_dev->groups[indio_dev->groupcounter++] = &ev_int->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) error_free_setup_event_lines:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	iio_free_chan_devattr_list(&ev_int->dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	kfree(ev_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	iio_dev_opaque->event_interface = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)  * iio_device_wakeup_eventset - Wakes up the event waitqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)  * @indio_dev: The IIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)  * Wakes up the event waitqueue used for poll() and blocking read().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)  * Should usually be called when the device is unregistered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	if (iio_dev_opaque->event_interface == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	wake_up(&iio_dev_opaque->event_interface->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) void iio_device_unregister_eventset(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	if (ev_int == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	iio_free_chan_devattr_list(&ev_int->dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	kfree(ev_int->group.attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	kfree(ev_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	iio_dev_opaque->event_interface = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }