^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* The industrial I/O core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2008 Jonathan Cameron
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Handling of buffer allocation / resizing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Things to look at here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * - Better memory allocation techniques?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * - Alternative access techniques?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/iio/iio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/iio/iio-opaque.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "iio_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "iio_core_trigger.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/iio/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/iio/buffer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/iio/buffer_impl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static const char * const iio_endian_prefix[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) [IIO_BE] = "be",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) [IIO_LE] = "le",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static bool iio_buffer_is_active(struct iio_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return !list_empty(&buf->buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static size_t iio_buffer_data_available(struct iio_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return buf->access->data_available(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct iio_buffer *buf, size_t required)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (!indio_dev->info->hwfifo_flush_to_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) size_t to_wait, int to_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) size_t avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int flushed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* wakeup if the device was unregistered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (!indio_dev->info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* drain the buffer if it was disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!iio_buffer_is_active(buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) to_wait = min_t(size_t, to_wait, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) to_flush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) avail = iio_buffer_data_available(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (avail >= to_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* force a flush for non-blocking reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!to_wait && avail < to_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) iio_buffer_flush_hwfifo(indio_dev, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) to_flush - avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (to_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) to_wait - avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (flushed <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (avail + flushed >= to_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * iio_buffer_read_outer() - chrdev read for buffer access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @filp: File structure pointer for the char device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @buf: Destination buffer for iio buffer read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @n: First n bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * @f_ps: Long offset provided by the user as a seek position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * This function relies on all buffer implementations having an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * iio_buffer as their first element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Return: negative values corresponding to error codes or ret != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * for ending the reading activity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) size_t n, loff_t *f_ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct iio_dev *indio_dev = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct iio_buffer *rb = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) DEFINE_WAIT_FUNC(wait, woken_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) size_t datum_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) size_t to_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (!indio_dev->info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!rb || !rb->access->read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) datum_size = rb->bytes_per_datum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * If datum_size is 0 there will never be anything to read from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * buffer, so signal end of file now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!datum_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (filp->f_flags & O_NONBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) to_wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) to_wait = min_t(size_t, n / datum_size, rb->watermark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) add_wait_queue(&rb->pollq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (!indio_dev->info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ret = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) wait_woken(&wait, TASK_INTERRUPTIBLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) MAX_SCHEDULE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ret = rb->access->read(rb, n, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (ret == 0 && (filp->f_flags & O_NONBLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) } while (ret == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) remove_wait_queue(&rb->pollq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * iio_buffer_poll() - poll the buffer to find out if it has data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * @filp: File structure pointer for device access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * @wait: Poll table structure pointer for which the driver adds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * a wait queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * or 0 for other cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) __poll_t iio_buffer_poll(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct poll_table_struct *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct iio_dev *indio_dev = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct iio_buffer *rb = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!indio_dev->info || rb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) poll_wait(filp, &rb->pollq, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * @indio_dev: The IIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * Wakes up the event waitqueue used for poll(). Should usually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * be called when the device is unregistered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) wake_up(&buffer->pollq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) void iio_buffer_init(struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) INIT_LIST_HEAD(&buffer->demux_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) INIT_LIST_HEAD(&buffer->buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) init_waitqueue_head(&buffer->pollq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kref_init(&buffer->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!buffer->watermark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) buffer->watermark = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) EXPORT_SYMBOL(iio_buffer_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * iio_buffer_set_attrs - Set buffer specific attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * @buffer: The buffer for which we are setting attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * @attrs: Pointer to a null terminated list of pointers to attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void iio_buffer_set_attrs(struct iio_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) const struct attribute **attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) buffer->attrs = attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) EXPORT_SYMBOL_GPL(iio_buffer_set_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static ssize_t iio_show_scan_index(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static ssize_t iio_show_fixed_type(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u8 type = this_attr->c->scan_type.endianness;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (type == IIO_CPU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #ifdef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) type = IIO_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) type = IIO_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (this_attr->c->scan_type.repeat > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) iio_endian_prefix[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) this_attr->c->scan_type.sign,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) this_attr->c->scan_type.realbits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) this_attr->c->scan_type.storagebits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) this_attr->c->scan_type.repeat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) this_attr->c->scan_type.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return sprintf(buf, "%s:%c%d/%d>>%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) iio_endian_prefix[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) this_attr->c->scan_type.sign,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) this_attr->c->scan_type.realbits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) this_attr->c->scan_type.storagebits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) this_attr->c->scan_type.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static ssize_t iio_scan_el_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* Ensure ret is 0 or 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ret = !!test_bit(to_iio_dev_attr(attr)->address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) buffer->scan_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return sprintf(buf, "%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Note NULL used as error indicator as it doesn't make sense. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned int masklength,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) const unsigned long *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) bool strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (bitmap_empty(mask, masklength))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) while (*av_masks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (strict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (bitmap_equal(mask, av_masks, masklength))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return av_masks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (bitmap_subset(mask, av_masks, masklength))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return av_masks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) av_masks += BITS_TO_LONGS(masklength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) const unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!indio_dev->setup_ops->validate_scan_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * iio_scan_mask_set() - set particular bit in the scan mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * @indio_dev: the iio device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @buffer: the buffer whose scan mask we are interested in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * @bit: the bit to be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * Note that at this point we have no way of knowing what other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * buffers might request, hence this code only verifies that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * individual buffers request is plausible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int iio_scan_mask_set(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct iio_buffer *buffer, int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) const unsigned long *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned long *trialmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (trialmask == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (!indio_dev->masklength) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) WARN(1, "Trying to set scanmask prior to registering buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) goto err_invalid_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) set_bit(bit, trialmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!iio_validate_scan_mask(indio_dev, trialmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) goto err_invalid_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (indio_dev->available_scan_masks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) mask = iio_scan_mask_match(indio_dev->available_scan_masks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) indio_dev->masklength,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) trialmask, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) goto err_invalid_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) bitmap_free(trialmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) err_invalid_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) bitmap_free(trialmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) clear_bit(bit, buffer->scan_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static int iio_scan_mask_query(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct iio_buffer *buffer, int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (bit > indio_dev->masklength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!buffer->scan_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* Ensure return value is 0 or 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return !!test_bit(bit, buffer->scan_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static ssize_t iio_scan_el_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) bool state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ret = strtobool(buf, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) mutex_lock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (iio_buffer_is_active(buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) goto error_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) goto error_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (!state && ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ret = iio_scan_mask_clear(buffer, this_attr->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto error_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) } else if (state && !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) goto error_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) error_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) mutex_unlock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return ret < 0 ? ret : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static ssize_t iio_scan_el_ts_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return sprintf(buf, "%d\n", buffer->scan_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static ssize_t iio_scan_el_ts_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) bool state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ret = strtobool(buf, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) mutex_lock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (iio_buffer_is_active(buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) goto error_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) buffer->scan_timestamp = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) error_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) mutex_unlock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return ret ? ret : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct iio_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) const struct iio_chan_spec *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int ret, attrcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ret = __iio_add_chan_devattr("index",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) &iio_show_scan_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) IIO_SEPARATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) &indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) &buffer->scan_el_dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) attrcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ret = __iio_add_chan_devattr("type",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) &iio_show_fixed_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) &indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) &buffer->scan_el_dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) attrcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (chan->type != IIO_TIMESTAMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ret = __iio_add_chan_devattr("en",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) &iio_scan_el_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) &iio_scan_el_store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) chan->scan_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) &indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) &buffer->scan_el_dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ret = __iio_add_chan_devattr("en",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) &iio_scan_el_ts_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) &iio_scan_el_ts_store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) chan->scan_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) &indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) &buffer->scan_el_dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) attrcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ret = attrcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static ssize_t iio_buffer_read_length(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return sprintf(buf, "%d\n", buffer->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static ssize_t iio_buffer_write_length(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ret = kstrtouint(buf, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (val == buffer->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) mutex_lock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (iio_buffer_is_active(buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) buffer->access->set_length(buffer, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (buffer->length && buffer->length < buffer->watermark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) buffer->watermark = buffer->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) mutex_unlock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return ret ? ret : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static ssize_t iio_buffer_show_enable(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return sprintf(buf, "%d\n", iio_buffer_is_active(buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) unsigned int scan_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) const struct iio_chan_spec *ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unsigned int bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ch = iio_find_channel_from_si(indio_dev, scan_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) bytes = ch->scan_type.storagebits / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (ch->scan_type.repeat > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) bytes *= ch->scan_type.repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return iio_storage_bytes_for_si(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) indio_dev->scan_index_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) const unsigned long *mask, bool timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) unsigned bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int length, i, largest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* How much space will the demuxed element take? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) for_each_set_bit(i, mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) indio_dev->masklength) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) length = iio_storage_bytes_for_si(indio_dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) bytes = ALIGN(bytes, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) bytes += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) largest = max(largest, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) length = iio_storage_bytes_for_timestamp(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) bytes = ALIGN(bytes, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) bytes += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) largest = max(largest, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) bytes = ALIGN(bytes, largest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static void iio_buffer_activate(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) iio_buffer_get(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static void iio_buffer_deactivate(struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) list_del_init(&buffer->buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) wake_up_interruptible(&buffer->pollq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) iio_buffer_put(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct iio_buffer *buffer, *_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) list_for_each_entry_safe(buffer, _buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) &iio_dev_opaque->buffer_list, buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) iio_buffer_deactivate(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static int iio_buffer_enable(struct iio_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (!buffer->access->enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return buffer->access->enable(buffer, indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static int iio_buffer_disable(struct iio_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!buffer->access->disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return buffer->access->disable(buffer, indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned int bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (!buffer->access->set_bytes_per_datum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) buffer->scan_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) buffer->access->set_bytes_per_datum(buffer, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static int iio_buffer_request_update(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) iio_buffer_update_bytes_per_datum(indio_dev, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (buffer->access->request_update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ret = buffer->access->request_update(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) dev_dbg(&indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) "Buffer not started: buffer parameter update failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static void iio_free_scan_mask(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) const unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* If the mask is dynamically allocated free it, otherwise do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (!indio_dev->available_scan_masks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) bitmap_free(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct iio_device_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) unsigned int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) unsigned int watermark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) const unsigned long *scan_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) unsigned int scan_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) bool scan_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static int iio_verify_update(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct iio_device_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) unsigned long *compound_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) const unsigned long *scan_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) bool strict_scanmask = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct iio_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) bool scan_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) unsigned int modes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (insert_buffer &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dev_dbg(&indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) "At least one scan element must be enabled first\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) memset(config, 0, sizeof(*config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) config->watermark = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * If there is just one buffer and we are removing it there is nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * to verify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (remove_buffer && !insert_buffer &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) list_is_singular(&iio_dev_opaque->buffer_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) modes = indio_dev->modes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (buffer == remove_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) modes &= buffer->access->modes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) config->watermark = min(config->watermark, buffer->watermark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (insert_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) modes &= insert_buffer->access->modes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) config->watermark = min(config->watermark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) insert_buffer->watermark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* Definitely possible for devices to support both of these. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) config->mode = INDIO_BUFFER_TRIGGERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) } else if (modes & INDIO_BUFFER_HARDWARE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * Keep things simple for now and only allow a single buffer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * be connected in hardware mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) config->mode = INDIO_BUFFER_HARDWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) strict_scanmask = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) } else if (modes & INDIO_BUFFER_SOFTWARE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) config->mode = INDIO_BUFFER_SOFTWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* Can only occur on first buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* What scan mask do we actually have? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (compound_mask == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) scan_timestamp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (buffer == remove_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) indio_dev->masklength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) scan_timestamp |= buffer->scan_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (insert_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) bitmap_or(compound_mask, compound_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) insert_buffer->scan_mask, indio_dev->masklength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) scan_timestamp |= insert_buffer->scan_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (indio_dev->available_scan_masks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) indio_dev->masklength,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) compound_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) strict_scanmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) bitmap_free(compound_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (scan_mask == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) scan_mask = compound_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) config->scan_bytes = iio_compute_scan_bytes(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) scan_mask, scan_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) config->scan_mask = scan_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) config->scan_timestamp = scan_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * struct iio_demux_table - table describing demux memcpy ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * @from: index to copy from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * @to: index to copy to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * @length: how many bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * @l: list head used for management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct iio_demux_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) unsigned from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) unsigned to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) unsigned length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct list_head l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static void iio_buffer_demux_free(struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct iio_demux_table *p, *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) list_del(&p->l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static int iio_buffer_add_demux(struct iio_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) unsigned int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (*p && (*p)->from + (*p)->length == in_loc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) (*p)->to + (*p)->length == out_loc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) (*p)->length += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) *p = kmalloc(sizeof(**p), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (*p == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) (*p)->from = in_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) (*p)->to = out_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) (*p)->length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) list_add_tail(&(*p)->l, &buffer->demux_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static int iio_buffer_update_demux(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int ret, in_ind = -1, out_ind, length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) unsigned in_loc = 0, out_loc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct iio_demux_table *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* Clear out any old demux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) iio_buffer_demux_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) kfree(buffer->demux_bounce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) buffer->demux_bounce = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* First work out which scan mode we will actually have */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (bitmap_equal(indio_dev->active_scan_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) buffer->scan_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) indio_dev->masklength))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* Now we have the two masks, work from least sig and build up sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) for_each_set_bit(out_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) buffer->scan_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) indio_dev->masklength) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) in_ind = find_next_bit(indio_dev->active_scan_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) indio_dev->masklength,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) in_ind + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) while (in_ind != out_ind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) length = iio_storage_bytes_for_si(indio_dev, in_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Make sure we are aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) in_loc = roundup(in_loc, length) + length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) in_ind = find_next_bit(indio_dev->active_scan_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) indio_dev->masklength,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) in_ind + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) length = iio_storage_bytes_for_si(indio_dev, in_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) out_loc = roundup(out_loc, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) in_loc = roundup(in_loc, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) goto error_clear_mux_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) out_loc += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) in_loc += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* Relies on scan_timestamp being last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (buffer->scan_timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) length = iio_storage_bytes_for_timestamp(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) out_loc = roundup(out_loc, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) in_loc = roundup(in_loc, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) goto error_clear_mux_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) out_loc += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) in_loc += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (buffer->demux_bounce == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto error_clear_mux_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) error_clear_mux_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) iio_buffer_demux_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static int iio_update_demux(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct iio_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) ret = iio_buffer_update_demux(indio_dev, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) goto error_clear_mux_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) error_clear_mux_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) iio_buffer_demux_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static int iio_enable_buffers(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct iio_device_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct iio_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) indio_dev->active_scan_mask = config->scan_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) indio_dev->scan_timestamp = config->scan_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) indio_dev->scan_bytes = config->scan_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) indio_dev->currentmode = config->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) iio_update_demux(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* Wind up again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (indio_dev->setup_ops->preenable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ret = indio_dev->setup_ops->preenable(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) dev_dbg(&indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) "Buffer not started: buffer preenable failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) goto err_undo_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (indio_dev->info->update_scan_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ret = indio_dev->info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) ->update_scan_mode(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) indio_dev->active_scan_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) dev_dbg(&indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) "Buffer not started: update scan mode failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto err_run_postdisable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (indio_dev->info->hwfifo_set_watermark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) indio_dev->info->hwfifo_set_watermark(indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) config->watermark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) ret = iio_buffer_enable(buffer, indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) goto err_disable_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) ret = iio_trigger_attach_poll_func(indio_dev->trig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) indio_dev->pollfunc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) goto err_disable_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (indio_dev->setup_ops->postenable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) ret = indio_dev->setup_ops->postenable(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) dev_dbg(&indio_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) "Buffer not started: postenable failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) goto err_detach_pollfunc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) err_detach_pollfunc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) iio_trigger_detach_poll_func(indio_dev->trig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) indio_dev->pollfunc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) err_disable_buffers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) iio_buffer_disable(buffer, indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) err_run_postdisable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (indio_dev->setup_ops->postdisable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) indio_dev->setup_ops->postdisable(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) err_undo_config:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) indio_dev->currentmode = INDIO_DIRECT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) indio_dev->active_scan_mask = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static int iio_disable_buffers(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct iio_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* Wind down existing buffers - iff there are any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (list_empty(&iio_dev_opaque->buffer_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * If things go wrong at some step in disable we still need to continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * to perform the other steps, otherwise we leave the device in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * inconsistent state. We return the error code for the first error we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * encountered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (indio_dev->setup_ops->predisable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ret2 = indio_dev->setup_ops->predisable(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (ret2 && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) iio_trigger_detach_poll_func(indio_dev->trig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) indio_dev->pollfunc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ret2 = iio_buffer_disable(buffer, indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (ret2 && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (indio_dev->setup_ops->postdisable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ret2 = indio_dev->setup_ops->postdisable(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (ret2 && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) indio_dev->active_scan_mask = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) indio_dev->currentmode = INDIO_DIRECT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static int __iio_update_buffers(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct iio_buffer *insert_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct iio_buffer *remove_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct iio_device_config new_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) &new_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (insert_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) ret = iio_buffer_request_update(indio_dev, insert_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) goto err_free_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ret = iio_disable_buffers(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) goto err_deactivate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (remove_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) iio_buffer_deactivate(remove_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (insert_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) iio_buffer_activate(indio_dev, insert_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* If no buffers in list, we are done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (list_empty(&iio_dev_opaque->buffer_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ret = iio_enable_buffers(indio_dev, &new_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) goto err_deactivate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) err_deactivate_all:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * We've already verified that the config is valid earlier. If things go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * wrong in either enable or disable the most likely reason is an IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * error from the device. In this case there is no good recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * strategy. Just make sure to disable everything and leave the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * in a sane state. With a bit of luck the device might come back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * life again later and userspace can try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) iio_buffer_deactivate_all(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) err_free_config:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) iio_free_scan_mask(indio_dev, new_config.scan_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) int iio_update_buffers(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct iio_buffer *insert_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct iio_buffer *remove_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (insert_buffer == remove_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) mutex_lock(&indio_dev->info_exist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) mutex_lock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (insert_buffer && iio_buffer_is_active(insert_buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) insert_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (remove_buffer && !iio_buffer_is_active(remove_buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) remove_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (!insert_buffer && !remove_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (indio_dev->info == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) mutex_unlock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) mutex_unlock(&indio_dev->info_exist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) EXPORT_SYMBOL_GPL(iio_update_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) void iio_disable_all_buffers(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) iio_disable_buffers(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) iio_buffer_deactivate_all(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static ssize_t iio_buffer_store_enable(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) bool requested_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) bool inlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) ret = strtobool(buf, &requested_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) mutex_lock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /* Find out if it is in the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) inlist = iio_buffer_is_active(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* Already in desired state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (inlist == requested_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (requested_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ret = __iio_update_buffers(indio_dev, buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) ret = __iio_update_buffers(indio_dev, NULL, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) mutex_unlock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return (ret < 0) ? ret : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static const char * const iio_scan_elements_group_name = "scan_elements";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static ssize_t iio_buffer_show_watermark(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return sprintf(buf, "%u\n", buffer->watermark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static ssize_t iio_buffer_store_watermark(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) ret = kstrtouint(buf, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) mutex_lock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (val > buffer->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (iio_buffer_is_active(buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) buffer->watermark = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) mutex_unlock(&indio_dev->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return ret ? ret : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static ssize_t iio_dma_show_data_available(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct iio_dev *indio_dev = dev_to_iio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) iio_buffer_write_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) static struct device_attribute dev_attr_length_ro = __ATTR(length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) S_IRUGO, iio_buffer_read_length, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) iio_buffer_show_enable, iio_buffer_store_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) iio_buffer_show_watermark, iio_buffer_store_watermark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) S_IRUGO, iio_buffer_show_watermark, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static DEVICE_ATTR(data_available, S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) iio_dma_show_data_available, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static struct attribute *iio_buffer_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) &dev_attr_length.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) &dev_attr_enable.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) &dev_attr_watermark.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) &dev_attr_data_available.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct iio_dev_attr *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct attribute **attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) int ret, i, attrn, attrcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) const struct iio_chan_spec *channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) attrcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (buffer->attrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) while (buffer->attrs[attrcount] != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) attrcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) sizeof(struct attribute *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (!buffer->access->set_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) attr[0] = &dev_attr_length_ro.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) attr[2] = &dev_attr_watermark_ro.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (buffer->attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) sizeof(struct attribute *) * attrcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) buffer->buffer_group.name = "buffer";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) buffer->buffer_group.attrs = attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) attrcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) channels = indio_dev->channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /* new magic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) for (i = 0; i < indio_dev->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (channels[i].scan_index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) &channels[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) goto error_cleanup_dynamic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) attrcount += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (channels[i].type == IIO_TIMESTAMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) indio_dev->scan_index_timestamp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) channels[i].scan_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (indio_dev->masklength && buffer->scan_mask == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (buffer->scan_mask == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) goto error_cleanup_dynamic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) buffer->scan_el_group.name = iio_scan_elements_group_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) sizeof(buffer->scan_el_group.attrs[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (buffer->scan_el_group.attrs == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) goto error_free_scan_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) attrn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) error_free_scan_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) bitmap_free(buffer->scan_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) error_cleanup_dynamic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) kfree(buffer->buffer_group.attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) const struct iio_chan_spec *channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) channels = indio_dev->channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) int ml = indio_dev->masklength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) for (i = 0; i < indio_dev->num_channels; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) ml = max(ml, channels[i].scan_index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) indio_dev->masklength = ml;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) bitmap_free(buffer->scan_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) kfree(buffer->buffer_group.attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) kfree(buffer->scan_el_group.attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct iio_buffer *buffer = indio_dev->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) __iio_buffer_free_sysfs_and_mask(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * @indio_dev: the iio device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * @mask: scan mask to be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * Return true if exactly one bit is set in the scan mask, false otherwise. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * can be used for devices where only one channel can be active for sampling at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) const unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return bitmap_weight(mask, indio_dev->masklength) == 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static const void *iio_demux(struct iio_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) const void *datain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct iio_demux_table *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (list_empty(&buffer->demux_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return datain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) list_for_each_entry(t, &buffer->demux_list, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) memcpy(buffer->demux_bounce + t->to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) datain + t->from, t->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return buffer->demux_bounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) const void *dataout = iio_demux(buffer, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) ret = buffer->access->store_to(buffer, dataout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * We can't just test for watermark to decide if we wake the poll queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * because read may request less samples than the watermark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * iio_push_to_buffers() - push to a registered buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * @indio_dev: iio_dev structure for device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * @data: Full scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct iio_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) ret = iio_push_to_buffer(buf, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) EXPORT_SYMBOL_GPL(iio_push_to_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * iio_buffer_release() - Free a buffer's resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * @ref: Pointer to the kref embedded in the iio_buffer struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * This function is called when the last reference to the buffer has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * dropped. It will typically free all resources allocated by the buffer. Do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * call this function manually, always use iio_buffer_put() when done using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static void iio_buffer_release(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) buffer->access->release(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * iio_buffer_get() - Grab a reference to the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * @buffer: The buffer to grab a reference for, may be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * Returns the pointer to the buffer that was passed into the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) kref_get(&buffer->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) EXPORT_SYMBOL_GPL(iio_buffer_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * iio_buffer_put() - Release the reference to the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * @buffer: The buffer to release the reference for, may be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) void iio_buffer_put(struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) kref_put(&buffer->ref, iio_buffer_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) EXPORT_SYMBOL_GPL(iio_buffer_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * iio_device_attach_buffer - Attach a buffer to a IIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * @indio_dev: The device the buffer should be attached to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * @buffer: The buffer to attach to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * This function attaches a buffer to a IIO device. The buffer stays attached to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * the device until the device is freed. The function should only be called at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * most once per device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) void iio_device_attach_buffer(struct iio_dev *indio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct iio_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) indio_dev->buffer = iio_buffer_get(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) EXPORT_SYMBOL_GPL(iio_device_attach_buffer);