^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * cmt_speech.c - HSI CMT speech driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pm_qos.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/hsi/hsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/hsi/ssi_protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/hsi/cs-protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define CS_MMAP_SIZE PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct char_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u32 msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct cs_char {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned int opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct hsi_client *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct cs_hsi_iface *hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct list_head chardev_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct list_head dataind_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int dataind_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* mmap things */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long mmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct fasync_struct *async_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* hsi channel ids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int channel_id_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int channel_id_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define SSI_CHANNEL_STATE_READING 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SSI_CHANNEL_STATE_WRITING (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SSI_CHANNEL_STATE_POLL (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SSI_CHANNEL_STATE_ERROR (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define TARGET_MASK 0xf000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define TARGET_LOCAL 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Number of pre-allocated commands buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define CS_MAX_CMDS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * During data transfers, transactions must be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * within 20ms (fixed value in cmtspeech HSI protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define CS_QOS_LATENCY_FOR_DATA_USEC 20000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Timeout to wait for pending HSI transfers to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define CS_HSI_TRANSFER_TIMEOUT_MS 500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define RX_PTR_BOUNDARY_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) CS_MAX_BUFFERS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct cs_hsi_iface {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct hsi_client *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct hsi_client *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned int iface_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned int wakeline_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned int control_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned int data_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* state exposed to application */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct cs_mmap_config_block *mmap_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned long mmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned int rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned int tx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* note: for security reasons, we do not trust the contents of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * mmap_cfg, but instead duplicate the variables here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned int buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned int rx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int tx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int rx_ptr_boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned int rx_offsets[CS_MAX_BUFFERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int tx_offsets[CS_MAX_BUFFERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* size of aligned memory blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int slot_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct list_head cmdqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct hsi_msg *data_rx_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct hsi_msg *data_tx_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) wait_queue_head_t datawait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct pm_qos_request pm_qos_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static struct cs_char cs_char_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline void rx_ptr_shift_too_big(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static void cs_notify(u32 message, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct char_queue *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_lock(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!cs_char_data.opened) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) spin_unlock(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) dev_err(&cs_char_data.cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) "Can't allocate new entry for the queue.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) spin_unlock(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) entry->msg = message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) list_add_tail(&entry->list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) spin_unlock(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) wake_up_interruptible(&cs_char_data.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static u32 cs_pop_entry(struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct char_queue *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) entry = list_entry(head->next, struct char_queue, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) data = entry->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) list_del(&entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static void cs_notify_control(u32 message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) cs_notify(message, &cs_char_data.chardev_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void cs_notify_data(u32 message, int maxlength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) cs_notify(message, &cs_char_data.dataind_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) spin_lock(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) cs_char_data.dataind_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) while (cs_char_data.dataind_pending > maxlength &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) !list_empty(&cs_char_data.dataind_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dev_dbg(&cs_char_data.cl->device, "data notification "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) "queue overrun (%u entries)\n", cs_char_data.dataind_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) cs_pop_entry(&cs_char_data.dataind_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) cs_char_data.dataind_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) spin_unlock(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 *data = sg_virt(msg->sgt.sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *data = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static inline u32 cs_get_cmd(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u32 *data = sg_virt(msg->sgt.sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void cs_release_cmd(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct cs_hsi_iface *hi = msg->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) list_add_tail(&msg->link, &hi->cmdqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void cs_cmd_destructor(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct cs_hsi_iface *hi = msg->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (hi->iface_state != CS_STATE_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (msg->ttype == HSI_MSG_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) hi->control_state &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) else if (msg->ttype == HSI_MSG_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) hi->control_state & SSI_CHANNEL_STATE_WRITING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) cs_release_cmd(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct hsi_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) BUG_ON(list_empty(&ssi->cmdqueue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) list_del(&msg->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) msg->destructor = cs_cmd_destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static void cs_free_cmds(struct cs_hsi_iface *ssi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct hsi_msg *msg, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) list_del(&msg->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) msg->destructor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) kfree(sg_virt(msg->sgt.sgl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) hsi_free_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static int cs_alloc_cmds(struct cs_hsi_iface *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct hsi_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u32 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) INIT_LIST_HEAD(&hi->cmdqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for (i = 0; i < CS_MAX_CMDS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) msg = hsi_alloc_msg(1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (!msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) buf = kmalloc(sizeof(*buf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) hsi_free_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) msg->channel = cs_char_data.channel_id_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) msg->context = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) list_add_tail(&msg->link, &hi->cmdqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) cs_free_cmds(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void cs_hsi_data_destructor(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct cs_hsi_iface *hi = msg->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (hi->iface_state != CS_STATE_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) dev_err(&cs_char_data.cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) "Data %s flush while device active\n", dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (msg->ttype == HSI_MSG_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) hi->data_state &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) msg->status = HSI_STATUS_COMPLETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (unlikely(waitqueue_active(&hi->datawait)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) wake_up_interruptible(&hi->datawait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int cs_hsi_alloc_data(struct cs_hsi_iface *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct hsi_msg *txmsg, *rxmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rxmsg = hsi_alloc_msg(1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!rxmsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) res = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rxmsg->channel = cs_char_data.channel_id_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) rxmsg->destructor = cs_hsi_data_destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) rxmsg->context = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) txmsg = hsi_alloc_msg(1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!txmsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) res = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) txmsg->channel = cs_char_data.channel_id_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) txmsg->destructor = cs_hsi_data_destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) txmsg->context = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) hi->data_rx_msg = rxmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) hi->data_tx_msg = txmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) hsi_free_msg(rxmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void cs_hsi_free_data_msg(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) WARN_ON(msg->status != HSI_STATUS_COMPLETED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) msg->status != HSI_STATUS_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) hsi_free_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void cs_hsi_free_data(struct cs_hsi_iface *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) cs_hsi_free_data_msg(hi->data_rx_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) cs_hsi_free_data_msg(hi->data_tx_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct hsi_msg *msg, const char *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned int *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) info, msg->status, *state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static inline void __cs_hsi_error_read_bits(unsigned int *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) *state |= SSI_CHANNEL_STATE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static inline void __cs_hsi_error_write_bits(unsigned int *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *state |= SSI_CHANNEL_STATE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *state &= ~SSI_CHANNEL_STATE_WRITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static void cs_hsi_control_read_error(struct cs_hsi_iface *hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) cs_release_cmd(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) __cs_hsi_error_read_bits(&hi->control_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) __cs_hsi_error_post(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static void cs_hsi_control_write_error(struct cs_hsi_iface *hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) cs_release_cmd(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) __cs_hsi_error_write_bits(&hi->control_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) __cs_hsi_error_post(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) __cs_hsi_error_read_bits(&hi->data_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) __cs_hsi_error_post(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void cs_hsi_data_write_error(struct cs_hsi_iface *hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) __cs_hsi_error_write_bits(&hi->data_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) __cs_hsi_error_post(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u32 cmd = cs_get_cmd(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct cs_hsi_iface *hi = msg->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) hi->control_state &= ~SSI_CHANNEL_STATE_READING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (msg->status == HSI_STATUS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) dev_err(&hi->cl->device, "Control RX error detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) cs_hsi_control_read_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) cs_release_cmd(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct timespec64 tspec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct cs_timestamp *tstamp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) &hi->mmap_cfg->tstamp_rx_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ktime_get_ts64(&tspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) tstamp->tv_sec = (__u32) tspec.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) tstamp->tv_nsec = (__u32) tspec.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) cs_notify_control(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) cs_hsi_read_on_control(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct cs_hsi_iface *hi = msg->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (msg->status == HSI_STATUS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) dev_err(&hi->cl->device, "Control peek RX error detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) cs_hsi_control_read_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) msg->sgt.nents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) msg->complete = cs_hsi_read_on_control_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ret = hsi_async_read(hi->cl, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) cs_hsi_control_read_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static void cs_hsi_read_on_control(struct cs_hsi_iface *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct hsi_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (hi->control_state & SSI_CHANNEL_STATE_READING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dev_err(&hi->cl->device, "Control read already pending (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) hi->control_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) dev_err(&hi->cl->device, "Control read error (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) hi->control_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) hi->control_state |= SSI_CHANNEL_STATE_READING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) dev_dbg(&hi->cl->device, "Issuing RX on control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) msg = cs_claim_cmd(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) msg->sgt.nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) msg->complete = cs_hsi_peek_on_control_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ret = hsi_async_read(hi->cl, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) cs_hsi_control_read_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void cs_hsi_write_on_control_complete(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct cs_hsi_iface *hi = msg->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (msg->status == HSI_STATUS_COMPLETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) cs_release_cmd(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) } else if (msg->status == HSI_STATUS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) cs_hsi_control_write_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dev_err(&hi->cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) "unexpected status in control write callback %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) msg->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct hsi_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (hi->control_state & SSI_CHANNEL_STATE_WRITING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dev_err(&hi->cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) "Write still pending on control channel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) hi->control_state |= SSI_CHANNEL_STATE_WRITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) msg = cs_claim_cmd(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) cs_set_cmd(msg, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) msg->sgt.nents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) msg->complete = cs_hsi_write_on_control_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) dev_dbg(&hi->cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) "Sending control message %08X\n", message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ret = hsi_async_write(hi->cl, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) dev_err(&hi->cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) "async_write failed with %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) cs_hsi_control_write_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * Make sure control read is always pending when issuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * new control writes. This is needed as the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * may flush our messages if e.g. the peer device reboots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * unexpectedly (and we cannot directly resubmit a new read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * the message destructor; see cs_cmd_destructor()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) dev_err(&hi->cl->device, "Restarting control reads\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) cs_hsi_read_on_control(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static void cs_hsi_read_on_data_complete(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct cs_hsi_iface *hi = msg->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) u32 payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (unlikely(msg->status == HSI_STATUS_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) cs_hsi_data_read_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) hi->data_state &= ~SSI_CHANNEL_STATE_READING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) payload = CS_RX_DATA_RECEIVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) payload |= hi->rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) hi->rx_slot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) hi->rx_slot %= hi->rx_ptr_boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* expose current rx ptr in mmap area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) hi->mmap_cfg->rx_ptr = hi->rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (unlikely(waitqueue_active(&hi->datawait)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) wake_up_interruptible(&hi->datawait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) cs_notify_data(payload, hi->rx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) cs_hsi_read_on_data(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct cs_hsi_iface *hi = msg->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) u32 *address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (unlikely(msg->status == HSI_STATUS_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) cs_hsi_data_read_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) dev_err(&hi->cl->device, "Data received in invalid state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) cs_hsi_data_read_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) hi->data_state &= ~SSI_CHANNEL_STATE_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) hi->data_state |= SSI_CHANNEL_STATE_READING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) address = (u32 *)(hi->mmap_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) hi->rx_offsets[hi->rx_slot % hi->rx_bufs]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) sg_init_one(msg->sgt.sgl, address, hi->buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) msg->sgt.nents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) msg->complete = cs_hsi_read_on_data_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ret = hsi_async_read(hi->cl, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) cs_hsi_data_read_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * Read/write transaction is ongoing. Returns false if in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * SSI_CHANNEL_STATE_POLL state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static inline int cs_state_xfer_active(unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return (state & SSI_CHANNEL_STATE_WRITING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) (state & SSI_CHANNEL_STATE_READING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * No pending read/writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static inline int cs_state_idle(unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return !(state & ~SSI_CHANNEL_STATE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static void cs_hsi_read_on_data(struct cs_hsi_iface *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct hsi_msg *rxmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (hi->data_state &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) hi->data_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) hi->data_state |= SSI_CHANNEL_STATE_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) rxmsg = hi->data_rx_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) rxmsg->sgt.nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) rxmsg->complete = cs_hsi_peek_on_data_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ret = hsi_async_read(hi->cl, rxmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) cs_hsi_data_read_error(hi, rxmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static void cs_hsi_write_on_data_complete(struct hsi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct cs_hsi_iface *hi = msg->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (msg->status == HSI_STATUS_COMPLETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (unlikely(waitqueue_active(&hi->datawait)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) wake_up_interruptible(&hi->datawait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) cs_hsi_data_write_error(hi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) u32 *address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct hsi_msg *txmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) spin_lock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (hi->iface_state != CS_STATE_CONFIGURED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dev_err(&hi->cl->device, "Not configured, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (hi->data_state & SSI_CHANNEL_STATE_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) dev_err(&hi->cl->device, "HSI error, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (hi->data_state & SSI_CHANNEL_STATE_WRITING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) dev_err(&hi->cl->device, "Write pending on data channel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) hi->data_state |= SSI_CHANNEL_STATE_WRITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) hi->tx_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) txmsg = hi->data_tx_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) sg_init_one(txmsg->sgt.sgl, address, hi->buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) txmsg->complete = cs_hsi_write_on_data_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) ret = hsi_async_write(hi->cl, txmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) cs_hsi_data_write_error(hi, txmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) spin_unlock(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (ret == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) cs_hsi_data_write_error(hi, hi->data_tx_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return hi->iface_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) switch (cmd & TARGET_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) case TARGET_REMOTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ret = cs_hsi_write_on_control(hi, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) case TARGET_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int change = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) spin_lock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (hi->wakeline_state != new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) hi->wakeline_state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) new_state, hi->cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) spin_unlock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) ssip_slave_start_tx(hi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ssip_slave_stop_tx(hi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) new_state, hi->cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) hi->rx_bufs = rx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) hi->tx_bufs = tx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) hi->mmap_cfg->rx_bufs = rx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) hi->mmap_cfg->tx_bufs = tx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * For more robust overrun detection, let the rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * pointer run in range 0..'boundary-1'. Boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * is a multiple of rx_bufs, and limited in max size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) hi->rx_ptr_boundary = hi->rx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static int check_buf_params(struct cs_hsi_iface *hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) const struct cs_buffer_config *buf_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) (buf_cfg->rx_bufs + buf_cfg->tx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (buf_cfg->rx_bufs > CS_MAX_BUFFERS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) buf_cfg->tx_bufs > CS_MAX_BUFFERS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) dev_err(&hi->cl->device, "No space for the requested buffer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) "configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) r = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * Block until pending data transfers have completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static int cs_hsi_data_sync(struct cs_hsi_iface *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) spin_lock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (!cs_state_xfer_active(hi->data_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (!cs_state_xfer_active(hi->data_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) r = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * prepare_to_wait must be called with hi->lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * so that callbacks can check for waitqueue_active()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) spin_unlock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) s = schedule_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) spin_lock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) finish_wait(&hi->datawait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (!s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dev_dbg(&hi->cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) "hsi_data_sync timeout after %d ms\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) CS_HSI_TRANSFER_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) r = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) spin_unlock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static void cs_hsi_data_enable(struct cs_hsi_iface *hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct cs_buffer_config *buf_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) unsigned int data_start, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) BUG_ON(hi->buf_size == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) dev_dbg(&hi->cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) "setting slot size to %u, buf size %u, align %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) hi->slot_size, hi->buf_size, L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dev_dbg(&hi->cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) "setting data start at %u, cfg block %u, align %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) hi->rx_offsets[i] = data_start + i * hi->slot_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) i, hi->rx_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) hi->tx_offsets[i] = data_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) (i + hi->mmap_cfg->rx_bufs) * hi->slot_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) i, hi->rx_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) hi->iface_state = CS_STATE_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (old_state == CS_STATE_CONFIGURED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) dev_dbg(&hi->cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) "closing data channel with slot size 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) hi->iface_state = CS_STATE_OPENED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct cs_buffer_config *buf_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) unsigned int old_state = hi->iface_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) spin_lock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* Prevent new transactions during buffer reconfig */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (old_state == CS_STATE_CONFIGURED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) hi->iface_state = CS_STATE_OPENED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) spin_unlock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * make sure that no non-zero data reads are ongoing before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * proceeding to change the buffer layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) r = cs_hsi_data_sync(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) WARN_ON(cs_state_xfer_active(hi->data_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) spin_lock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) r = check_buf_params(hi, buf_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) hi->buf_size = buf_cfg->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) hi->mmap_cfg->buf_size = hi->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) hi->flags = buf_cfg->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) hi->rx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) hi->tx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) hi->slot_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (hi->buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) cs_hsi_data_enable(hi, buf_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) cs_hsi_data_disable(hi, old_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) spin_unlock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (old_state != hi->iface_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (hi->iface_state == CS_STATE_CONFIGURED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) cpu_latency_qos_add_request(&hi->pm_qos_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) CS_QOS_LATENCY_FOR_DATA_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) cs_hsi_read_on_data(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) } else if (old_state == CS_STATE_CONFIGURED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) cpu_latency_qos_remove_request(&hi->pm_qos_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) spin_unlock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) unsigned long mmap_base, unsigned long mmap_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) dev_dbg(&cl->device, "cs_hsi_start\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (!hsi_if) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) goto leave0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) spin_lock_init(&hsi_if->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) hsi_if->cl = cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) hsi_if->iface_state = CS_STATE_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) hsi_if->mmap_base = mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) hsi_if->mmap_size = mmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) init_waitqueue_head(&hsi_if->datawait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) err = cs_alloc_cmds(hsi_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) dev_err(&cl->device, "Unable to alloc HSI messages\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) goto leave1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) err = cs_hsi_alloc_data(hsi_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) dev_err(&cl->device, "Unable to alloc HSI messages for data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) goto leave2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) err = hsi_claim_port(cl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) dev_err(&cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) "Could not open, HSI port already claimed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) goto leave3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) hsi_if->master = ssip_slave_get_master(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (IS_ERR(hsi_if->master)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) err = PTR_ERR(hsi_if->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) dev_err(&cl->device, "Could not get HSI master client\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) goto leave4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (!ssip_slave_running(hsi_if->master)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) dev_err(&cl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) "HSI port not initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) goto leave4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) hsi_if->iface_state = CS_STATE_OPENED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) cs_hsi_read_on_control(hsi_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dev_dbg(&cl->device, "cs_hsi_start...done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) BUG_ON(!hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) *hi = hsi_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) leave4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) hsi_release_port(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) leave3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) cs_hsi_free_data(hsi_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) leave2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) cs_free_cmds(hsi_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) leave1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) kfree(hsi_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) leave0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static void cs_hsi_stop(struct cs_hsi_iface *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) cs_hsi_set_wakeline(hi, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ssip_slave_put_master(hi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /* hsi_release_port() needs to be called with CS_STATE_CLOSED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) hi->iface_state = CS_STATE_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) hsi_release_port(hi->cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * hsi_release_port() should flush out all the pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * messages, so cs_state_idle() should be true for both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * control and data channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) WARN_ON(!cs_state_idle(hi->control_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) WARN_ON(!cs_state_idle(hi->data_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (cpu_latency_qos_request_active(&hi->pm_qos_req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) cpu_latency_qos_remove_request(&hi->pm_qos_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) spin_lock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) cs_hsi_free_data(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) cs_free_cmds(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) spin_unlock_bh(&hi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) kfree(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct cs_char *csdata = vmf->vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) page = virt_to_page(csdata->mmap_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) vmf->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static const struct vm_operations_struct cs_char_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) .fault = cs_char_vma_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) static int cs_char_fasync(int fd, struct file *file, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) struct cs_char *csdata = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (fasync_helper(fd, file, on, &csdata->async_queue) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static __poll_t cs_char_poll(struct file *file, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct cs_char *csdata = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) __poll_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) poll_wait(file, &cs_char_data.wait, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) spin_lock_bh(&csdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (!list_empty(&csdata->chardev_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) ret = EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) else if (!list_empty(&csdata->dataind_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) ret = EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) spin_unlock_bh(&csdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) loff_t *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct cs_char *csdata = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ssize_t retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (count < sizeof(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) spin_lock_bh(&csdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (!list_empty(&csdata->chardev_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) data = cs_pop_entry(&csdata->chardev_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) } else if (!list_empty(&csdata->dataind_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) data = cs_pop_entry(&csdata->dataind_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) csdata->dataind_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) spin_unlock_bh(&csdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (file->f_flags & O_NONBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) } else if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) retval = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) prepare_to_wait_exclusive(&csdata->wait, &wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) finish_wait(&csdata->wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) retval = put_user(data, (u32 __user *)buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (!retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) retval = sizeof(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static ssize_t cs_char_write(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) size_t count, loff_t *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct cs_char *csdata = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ssize_t retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (count < sizeof(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (get_user(data, (u32 __user *)buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) retval = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) err = cs_hsi_command(csdata->hi, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) retval = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static long cs_char_ioctl(struct file *file, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct cs_char *csdata = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) case CS_GET_STATE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) unsigned int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) state = cs_hsi_get_state(csdata->hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (copy_to_user((void __user *)arg, &state, sizeof(state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) case CS_SET_WAKELINE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) unsigned int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (copy_from_user(&state, (void __user *)arg, sizeof(state))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (state > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) cs_hsi_set_wakeline(csdata->hi, !!state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) case CS_GET_IF_VERSION: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) unsigned int ifver = CS_IF_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) case CS_CONFIG_BUFS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct cs_buffer_config buf_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (copy_from_user(&buf_cfg, (void __user *)arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) sizeof(buf_cfg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) r = cs_hsi_buf_config(csdata->hi, &buf_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) r = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (vma->vm_end < vma->vm_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (vma_pages(vma) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) vma->vm_ops = &cs_char_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) vma->vm_private_data = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static int cs_char_open(struct inode *unused, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) unsigned long p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) spin_lock_bh(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (cs_char_data.opened) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) spin_unlock_bh(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) cs_char_data.opened = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) cs_char_data.dataind_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) spin_unlock_bh(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) p = get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* these are only used in release so lock not needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) cs_char_data.mmap_base = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) cs_char_data.mmap_size = CS_MMAP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) file->private_data = &cs_char_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) free_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) spin_lock_bh(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) cs_char_data.opened = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) spin_unlock_bh(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static void cs_free_char_queue(struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct char_queue *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) struct list_head *cursor, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) list_for_each_safe(cursor, next, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) entry = list_entry(cursor, struct char_queue, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) list_del(&entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static int cs_char_release(struct inode *unused, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct cs_char *csdata = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) cs_hsi_stop(csdata->hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) spin_lock_bh(&csdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) csdata->hi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) free_page(csdata->mmap_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) cs_free_char_queue(&csdata->chardev_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) cs_free_char_queue(&csdata->dataind_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) csdata->opened = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) spin_unlock_bh(&csdata->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static const struct file_operations cs_char_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) .read = cs_char_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) .write = cs_char_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) .poll = cs_char_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) .unlocked_ioctl = cs_char_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) .mmap = cs_char_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) .open = cs_char_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) .release = cs_char_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) .fasync = cs_char_fasync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static struct miscdevice cs_char_miscdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) .minor = MISC_DYNAMIC_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) .name = "cmt_speech",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) .fops = &cs_char_fops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) static int cs_hsi_client_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct hsi_client *cl = to_hsi_client(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) dev_dbg(dev, "hsi_client_probe\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) init_waitqueue_head(&cs_char_data.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) spin_lock_init(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) cs_char_data.opened = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) cs_char_data.cl = cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) cs_char_data.hi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) INIT_LIST_HEAD(&cs_char_data.chardev_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) INIT_LIST_HEAD(&cs_char_data.dataind_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) "speech-control");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (cs_char_data.channel_id_cmd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) err = cs_char_data.channel_id_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) dev_err(dev, "Could not get cmd channel (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) "speech-data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (cs_char_data.channel_id_data < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) err = cs_char_data.channel_id_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) dev_err(dev, "Could not get data channel (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) err = misc_register(&cs_char_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) dev_err(dev, "Failed to register: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static int cs_hsi_client_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) struct cs_hsi_iface *hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) dev_dbg(dev, "hsi_client_remove\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) misc_deregister(&cs_char_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) spin_lock_bh(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) hi = cs_char_data.hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) cs_char_data.hi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) spin_unlock_bh(&cs_char_data.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) cs_hsi_stop(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) static struct hsi_client_driver cs_hsi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) .name = "cmt-speech",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) .probe = cs_hsi_client_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) .remove = cs_hsi_client_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) static int __init cs_char_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) pr_info("CMT speech driver added\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return hsi_register_client_driver(&cs_hsi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) module_init(cs_char_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) static void __exit cs_char_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) hsi_unregister_client_driver(&cs_hsi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) pr_info("CMT speech driver removed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) module_exit(cs_char_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) MODULE_ALIAS("hsi:cmt-speech");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) MODULE_AUTHOR("Kai Vehmanen <kai.vehmanen@nokia.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) MODULE_DESCRIPTION("CMT speech driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) MODULE_LICENSE("GPL v2");