^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ISHTP client logic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2003-2016, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "hbm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "client.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned long tx_free_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return cl->tx_ring_free_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * ishtp_read_list_flush() - Flush read queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * @cl: ishtp client instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Used to remove all entries from read queue for a client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void ishtp_read_list_flush(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct ishtp_cl_rb *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct ishtp_cl_rb *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ishtp_io_rb_free(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * ishtp_cl_flush_queues() - Flush all queues for a client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @cl: ishtp client instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Used to remove all queues for a client. This is called when a client device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * needs reset due to error, S3 resume or during module removal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Return: 0 on success else -EINVAL if device is NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int ishtp_cl_flush_queues(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ishtp_read_list_flush(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) EXPORT_SYMBOL(ishtp_cl_flush_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * ishtp_cl_init() - Initialize all fields of a client device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * @cl: ishtp client instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * @dev: ishtp device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Initializes a client device fields: Init spinlocks, init queues etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * This function is called during new client creation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) memset(cl, 0, sizeof(struct ishtp_cl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) init_waitqueue_head(&cl->wait_ctrl_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) spin_lock_init(&cl->free_list_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) spin_lock_init(&cl->in_process_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) spin_lock_init(&cl->tx_list_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) spin_lock_init(&cl->tx_free_list_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) spin_lock_init(&cl->fc_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) INIT_LIST_HEAD(&cl->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cl->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) INIT_LIST_HEAD(&cl->free_rb_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) INIT_LIST_HEAD(&cl->tx_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) INIT_LIST_HEAD(&cl->tx_free_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) INIT_LIST_HEAD(&cl->in_process_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) cl->tx_ring_free_size = cl->tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) cl->last_tx_path = CL_TX_PATH_IPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) cl->last_dma_acked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) cl->last_dma_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) cl->last_ipc_acked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * ishtp_cl_allocate() - allocates client structure and sets it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @dev: ishtp device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Allocate memory for new client device and call to initialize each field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Return: The allocated client instance or NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct ishtp_cl *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ishtp_cl_init(cl, cl_device->ishtp_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) EXPORT_SYMBOL(ishtp_cl_allocate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * ishtp_cl_free() - Frees a client device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Frees a client device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void ishtp_cl_free(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct ishtp_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) spin_lock_irqsave(&dev->cl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ishtp_cl_free_rx_ring(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ishtp_cl_free_tx_ring(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) kfree(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) spin_unlock_irqrestore(&dev->cl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) EXPORT_SYMBOL(ishtp_cl_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * ishtp_cl_link() - Reserve a host id and link the client instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * This allocates a single bit in the hostmap. This function will make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * that not many client sessions are opened at the same time. Once allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * the client device instance is added to the ishtp device in the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * client list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Return: 0 or error code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int ishtp_cl_link(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct ishtp_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned long flags, flags_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int id, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) spin_lock_irqsave(&dev->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ret = -EMFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) goto unlock_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (id >= ISHTP_CLIENTS_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) spin_unlock_irqrestore(&dev->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dev->open_handle_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) cl->host_client_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (dev->dev_state != ISHTP_DEV_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) goto unlock_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) list_add_tail(&cl->link, &dev->cl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) set_bit(id, dev->host_clients_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) cl->state = ISHTP_CL_INITIALIZING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unlock_cl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unlock_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) spin_unlock_irqrestore(&dev->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) EXPORT_SYMBOL(ishtp_cl_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * ishtp_cl_unlink() - remove fw_cl from the client device list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Remove a previously linked device to a ishtp device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void ishtp_cl_unlink(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct ishtp_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct ishtp_cl *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* don't shout on error exit path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!cl || !cl->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) spin_lock_irqsave(&dev->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (dev->open_handle_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) clear_bit(cl->host_client_id, dev->host_clients_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dev->open_handle_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) spin_unlock_irqrestore(&dev->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * This checks that 'cl' is actually linked into device's structure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * before attempting 'list_del'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) spin_lock_irqsave(&dev->cl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) list_for_each_entry(pos, &dev->cl_list, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (cl->host_client_id == pos->host_client_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) list_del_init(&pos->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) spin_unlock_irqrestore(&dev->cl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) EXPORT_SYMBOL(ishtp_cl_unlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * ishtp_cl_disconnect() - Send disconnect request to firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Send a disconnect request for a client to firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Return: 0 if successful disconnect response from the firmware or error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int ishtp_cl_disconnect(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct ishtp_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (cl->state != ISHTP_CL_DISCONNECTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dev_err(&cl->device->dev, "failed to disconnect.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) (dev->dev_state != ISHTP_DEV_ENABLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) cl->state == ISHTP_CL_DISCONNECTED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * If FW reset arrived, this will happen. Don't check cl->,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * as 'cl' may be freed already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (dev->dev_state != ISHTP_DEV_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (cl->state == ISHTP_CL_DISCONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) dev->print_log(dev, "%s() successful\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) EXPORT_SYMBOL(ishtp_cl_disconnect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * ishtp_cl_is_other_connecting() - Check other client is connecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * Checks if other client with the same fw client id is connecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Return: true if other client is connected else false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct ishtp_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct ishtp_cl *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) spin_lock_irqsave(&dev->cl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) list_for_each_entry(pos, &dev->cl_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) cl->fw_client_id == pos->fw_client_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) spin_unlock_irqrestore(&dev->cl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spin_unlock_irqrestore(&dev->cl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * ishtp_cl_connect() - Send connect request to firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Send a connect request for a client to firmware. If successful it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * RX and TX ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Return: 0 if successful connect response from the firmware and able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * to bind and allocate ring buffers or error code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int ishtp_cl_connect(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct ishtp_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (ishtp_cl_is_other_connecting(cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dev->print_log(dev, "%s() Busy\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (ishtp_hbm_cl_connect_req(dev, cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) (dev->dev_state == ISHTP_DEV_ENABLED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) (cl->state == ISHTP_CL_CONNECTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) cl->state == ISHTP_CL_DISCONNECTED)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ishtp_secs_to_jiffies(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ISHTP_CL_CONNECT_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * If FW reset arrived, this will happen. Don't check cl->,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * as 'cl' may be freed already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (dev->dev_state != ISHTP_DEV_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (cl->state != ISHTP_CL_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) rets = cl->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (rets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dev->print_log(dev, "%s() Invalid status\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) rets = ishtp_cl_device_bind(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (rets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) dev->print_log(dev, "%s() Bind error\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ishtp_cl_disconnect(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) rets = ishtp_cl_alloc_rx_ring(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (rets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* if failed allocation, disconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ishtp_cl_disconnect(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) rets = ishtp_cl_alloc_tx_ring(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (rets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* if failed allocation, disconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ishtp_cl_free_rx_ring(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ishtp_cl_disconnect(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Upon successful connection and allocation, emit flow-control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) rets = ishtp_cl_read_start(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dev->print_log(dev, "%s() successful\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) EXPORT_SYMBOL(ishtp_cl_connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * ishtp_cl_read_start() - Prepare to read client message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * Get a free buffer from pool of free read buffers and add to read buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * pool to add contents. Send a flow control request to firmware to be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * send next message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Return: 0 if successful or error code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int ishtp_cl_read_start(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct ishtp_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct ishtp_cl_rb *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) unsigned long dev_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (cl->state != ISHTP_CL_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (dev->dev_state != ISHTP_DEV_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (i < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dev_err(&cl->device->dev, "no such fw client %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) cl->fw_client_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* The current rb is the head of the free rb list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) spin_lock_irqsave(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (list_empty(&cl->free_rb_list.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dev_warn(&cl->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) "[ishtp-ish] Rx buffers pool is empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) rets = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) rb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) list_del_init(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rb->cl = cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) rb->buf_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) INIT_LIST_HEAD(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) rets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * This must be BEFORE sending flow control -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * response in ISR may come too fast...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) list_add_tail(&rb->list, &dev->read_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) rets = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (rets && rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) spin_lock_irqsave(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) list_add_tail(&rb->list, &cl->free_rb_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * ishtp_cl_send() - Send a message to firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * @buf: message buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * @length: length of message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * If the client is correct state to send message, this function gets a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * from tx ring buffers, copy the message data and call to send the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * using ishtp_cl_send_msg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Return: 0 if successful or error code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct ishtp_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct ishtp_cl_tx_ring *cl_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int have_msg_to_send = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) unsigned long tx_flags, tx_free_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (cl->state != ISHTP_CL_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ++cl->err_send_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (dev->dev_state != ISHTP_DEV_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ++cl->err_send_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* Check if we have fw client device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ++cl->err_send_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (length > dev->fw_clients[id].props.max_msg_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ++cl->err_send_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* No free bufs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (list_empty(&cl->tx_free_list.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) tx_free_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ++cl->err_send_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) cl_msg = list_first_entry(&cl->tx_free_list.list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct ishtp_cl_tx_ring, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!cl_msg->send_buf.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) tx_free_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* Should not happen, as free list is pre-allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * This is safe, as 'length' is already checked for not exceeding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * max ISHTP message size per client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) list_del_init(&cl_msg->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) --cl->tx_ring_free_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) memcpy(cl_msg->send_buf.data, buf, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) cl_msg->send_buf.size = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) have_msg_to_send = !list_empty(&cl->tx_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) list_add_tail(&cl_msg->list, &cl->tx_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ishtp_cl_send_msg(dev, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) EXPORT_SYMBOL(ishtp_cl_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * ishtp_cl_read_complete() - read complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * @rb: Pointer to client request block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * If the message is completely received call ishtp_cl_bus_rx_event()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * to process message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int schedule_work_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct ishtp_cl *cl = rb->cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) spin_lock_irqsave(&cl->in_process_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * if in-process list is empty, then need to schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * the processing thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) schedule_work_flag = list_empty(&cl->in_process_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) list_add_tail(&rb->list, &cl->in_process_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (schedule_work_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ishtp_cl_bus_rx_event(cl->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * ipc_tx_callback() - IPC tx callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * @prm: Pointer to client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * Send message over IPC either first time or on callback on previous message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static void ipc_tx_callback(void *prm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct ishtp_cl *cl = prm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct ishtp_cl_tx_ring *cl_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) size_t rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct ishtp_device *dev = (cl ? cl->dev : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct ishtp_msg_hdr ishtp_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unsigned long tx_flags, tx_free_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) unsigned char *pmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * Other conditions if some critical error has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * occurred before this callback is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (dev->dev_state != ISHTP_DEV_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (cl->state != ISHTP_CL_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (list_empty(&cl->tx_list.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!cl->sending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) --cl->ishtp_flow_ctrl_creds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) cl->last_ipc_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) cl->last_tx_path = CL_TX_PATH_IPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) cl->sending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) rem = cl_msg->send_buf.size - cl->tx_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ishtp_hdr.host_addr = cl->host_client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ishtp_hdr.fw_addr = cl->fw_client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ishtp_hdr.reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) pmsg = cl_msg->send_buf.data + cl->tx_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (rem <= dev->mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ishtp_hdr.length = rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ishtp_hdr.msg_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) cl->sending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) list_del_init(&cl_msg->list); /* Must be before write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Submit to IPC queue with no callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ishtp_write_message(dev, &ishtp_hdr, pmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ++cl->tx_ring_free_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) tx_free_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* Send IPC fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) cl->tx_offs += dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ishtp_hdr.length = dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ishtp_hdr.msg_complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * ishtp_cl_send_msg_ipc() -Send message using IPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * @dev: ISHTP device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * @cl: Pointer to client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * Send message over IPC not using DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* If last DMA message wasn't acked yet, leave this one in Tx queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) cl->tx_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ipc_tx_callback(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ++cl->send_msg_cnt_ipc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * ishtp_cl_send_msg_dma() -Send message using DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * @dev: ISHTP device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * @cl: Pointer to client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * Send message using DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct ishtp_msg_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct dma_xfer_hbm dma_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) unsigned char *msg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct ishtp_cl_tx_ring *cl_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) unsigned long tx_flags, tx_free_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* If last IPC message wasn't acked yet, leave this one in Tx queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (list_empty(&cl->tx_list.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (!msg_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (dev->transfer_path == CL_TX_PATH_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ishtp_cl_send_msg_ipc(dev, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) list_del_init(&cl_msg->list); /* Must be before write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) --cl->ishtp_flow_ctrl_creds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) cl->last_dma_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) cl->last_dma_addr = msg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cl->last_tx_path = CL_TX_PATH_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* write msg to dma buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* send dma_xfer hbm msg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) dma_xfer.hbm = DMA_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) dma_xfer.fw_client_id = cl->fw_client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) dma_xfer.host_client_id = cl->host_client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dma_xfer.reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) dma_xfer.msg_length = cl_msg->send_buf.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) dma_xfer.reserved2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ++cl->tx_ring_free_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ++cl->send_msg_cnt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * ishtp_cl_send_msg() -Send message using DMA or IPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * @dev: ISHTP device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * @cl: Pointer to client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * Send message using DMA or IPC based on transfer_path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (dev->transfer_path == CL_TX_PATH_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ishtp_cl_send_msg_dma(dev, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) ishtp_cl_send_msg_ipc(dev, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * recv_ishtp_cl_msg() -Receive client message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * @dev: ISHTP device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * @ishtp_hdr: Pointer to message header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * Receive and dispatch ISHTP client messages. This function executes in ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * or work queue context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) void recv_ishtp_cl_msg(struct ishtp_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct ishtp_msg_hdr *ishtp_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct ishtp_cl *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct ishtp_cl_rb *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct ishtp_cl_rb *new_rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) unsigned char *buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct ishtp_cl_rb *complete_rb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) int rb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (ishtp_hdr->reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dev_err(dev->devc, "corrupted message header.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) goto eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) dev_err(dev->devc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) "ISHTP message length in hdr exceeds IPC MTU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) goto eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) spin_lock_irqsave(&dev->read_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) rb_count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) list_for_each_entry(rb, &dev->read_list.list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ++rb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) cl = rb->cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) cl->fw_client_id == ishtp_hdr->fw_addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) !(cl->state == ISHTP_CL_CONNECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* If no Rx buffer is allocated, disband the rb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) dev_err(&cl->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) "Rx buffer is not allocated.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ishtp_io_rb_free(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) cl->status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) goto eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * If message buffer overflown (exceeds max. client msg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * size, drop message and return to free buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * Do we need to disconnect such a client? (We don't send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * back FC, so communication will be stuck anyway)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) dev_err(&cl->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) "message overflow. size %d len %d idx %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) rb->buffer.size, ishtp_hdr->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) rb->buf_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ishtp_cl_io_rb_recycle(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) cl->status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) goto eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) buffer = rb->buffer.data + rb->buf_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) rb->buf_idx += ishtp_hdr->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (ishtp_hdr->msg_complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* Last fragment in message - it's complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) cl->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) complete_rb = rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) --cl->out_flow_ctrl_creds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * the whole msg arrived, send a new FC, and add a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * rb buffer for the next coming msg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) spin_lock(&cl->free_list_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (!list_empty(&cl->free_rb_list.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) new_rb = list_entry(cl->free_rb_list.list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct ishtp_cl_rb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) list_del_init(&new_rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) spin_unlock(&cl->free_list_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) new_rb->cl = cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) new_rb->buf_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) INIT_LIST_HEAD(&new_rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) list_add_tail(&new_rb->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) &dev->read_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ishtp_hbm_cl_flow_control_req(dev, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) spin_unlock(&cl->free_list_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* One more fragment in message (even if this was last) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ++cl->recv_msg_num_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * We can safely break here (and in BH too),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * a single input message can go only to a single request!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* If it's nobody's message, just read and discard it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) dev_err(dev->devc, "Dropped Rx msg - no request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) goto eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (complete_rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) cl = complete_rb->cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) cl->ts_rx = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ++cl->recv_msg_cnt_ipc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ishtp_cl_read_complete(complete_rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) eoi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * recv_ishtp_cl_msg_dma() -Receive client message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * @dev: ISHTP device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * @msg: message pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * @hbm: hbm buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * Receive and dispatch ISHTP client messages using DMA. This function executes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * in ISR or work queue context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct dma_xfer_hbm *hbm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct ishtp_cl *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct ishtp_cl_rb *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct ishtp_cl_rb *new_rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) unsigned char *buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct ishtp_cl_rb *complete_rb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) spin_lock_irqsave(&dev->read_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) list_for_each_entry(rb, &dev->read_list.list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) cl = rb->cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (!cl || !(cl->host_client_id == hbm->host_client_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cl->fw_client_id == hbm->fw_client_id) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) !(cl->state == ISHTP_CL_CONNECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * If no Rx buffer is allocated, disband the rb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) dev_err(&cl->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) "response buffer is not allocated.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ishtp_io_rb_free(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) cl->status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) goto eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * If message buffer overflown (exceeds max. client msg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * size, drop message and return to free buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * Do we need to disconnect such a client? (We don't send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * back FC, so communication will be stuck anyway)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (rb->buffer.size < hbm->msg_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) dev_err(&cl->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) "message overflow. size %d len %d idx %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) rb->buffer.size, hbm->msg_length, rb->buf_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ishtp_cl_io_rb_recycle(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) cl->status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) goto eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) buffer = rb->buffer.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) memcpy(buffer, msg, hbm->msg_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) rb->buf_idx = hbm->msg_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /* Last fragment in message - it's complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) cl->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) complete_rb = rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) --cl->out_flow_ctrl_creds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * the whole msg arrived, send a new FC, and add a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * rb buffer for the next coming msg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) spin_lock(&cl->free_list_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (!list_empty(&cl->free_rb_list.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) new_rb = list_entry(cl->free_rb_list.list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct ishtp_cl_rb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) list_del_init(&new_rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) spin_unlock(&cl->free_list_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) new_rb->cl = cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) new_rb->buf_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) INIT_LIST_HEAD(&new_rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) list_add_tail(&new_rb->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) &dev->read_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ishtp_hbm_cl_flow_control_req(dev, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) spin_unlock(&cl->free_list_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* One more fragment in message (this is always last) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ++cl->recv_msg_num_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * We can safely break here (and in BH too),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * a single input message can go only to a single request!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* If it's nobody's message, just read and discard it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) goto eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (complete_rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) cl = complete_rb->cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) cl->ts_rx = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ++cl->recv_msg_cnt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ishtp_cl_read_complete(complete_rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) eoi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) void *ishtp_get_client_data(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return cl->client_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) EXPORT_SYMBOL(ishtp_get_client_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) cl->client_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) EXPORT_SYMBOL(ishtp_set_client_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) EXPORT_SYMBOL(ishtp_get_ishtp_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) cl->tx_ring_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) EXPORT_SYMBOL(ishtp_set_tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) cl->rx_ring_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) EXPORT_SYMBOL(ishtp_set_rx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) cl->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) EXPORT_SYMBOL(ishtp_set_connection_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) cl->fw_client_id = fw_client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);