^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ISHTP Ring Buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2003-2016, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "client.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Allocate and initialize RX ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Return: 0 on success else -ENOMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) size_t len = cl->device->fw_client->props.max_msg_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct ishtp_cl_rb *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) for (j = 0; j < cl->rx_ring_size; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) rb = ishtp_io_rb_init(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (!rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) ret = ishtp_io_rb_alloc_buf(rb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) spin_lock_irqsave(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) list_add_tail(&rb->list, &cl->free_rb_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ishtp_cl_free_rx_ring(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Allocate and initialize TX ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Return: 0 on success else -ENOMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) size_t len = cl->device->fw_client->props.max_msg_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) cl->tx_ring_free_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Allocate pool to free Tx bufs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) for (j = 0; j < cl->tx_ring_size; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct ishtp_cl_tx_ring *tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!tx_buf->send_buf.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) kfree(tx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ++cl->tx_ring_free_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) dev_err(&cl->device->dev, "error in allocating Tx pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) ishtp_cl_free_tx_ring(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * ishtp_cl_free_rx_ring() - Free RX ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Free RX ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct ishtp_cl_rb *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* release allocated memory - pass over free_rb_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spin_lock_irqsave(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) while (!list_empty(&cl->free_rb_list.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) kfree(rb->buffer.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) kfree(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* release allocated memory - pass over in_process_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) spin_lock_irqsave(&cl->in_process_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) while (!list_empty(&cl->in_process_list.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) rb = list_entry(cl->in_process_list.list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct ishtp_cl_rb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) kfree(rb->buffer.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) kfree(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * ishtp_cl_free_tx_ring() - Free TX ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Free TX ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct ishtp_cl_tx_ring *tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* release allocated memory - pass over tx_free_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) while (!list_empty(&cl->tx_free_list.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) tx_buf = list_entry(cl->tx_free_list.list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct ishtp_cl_tx_ring, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) list_del(&tx_buf->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) --cl->tx_ring_free_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) kfree(tx_buf->send_buf.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) kfree(tx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spin_lock_irqsave(&cl->tx_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* release allocated memory - pass over tx_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) while (!list_empty(&cl->tx_list.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) tx_buf = list_entry(cl->tx_list.list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct ishtp_cl_tx_ring, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) list_del(&tx_buf->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) kfree(tx_buf->send_buf.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) kfree(tx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * ishtp_io_rb_free() - Free IO request block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * @rb: IO request block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Free io request block memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (rb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) kfree(rb->buffer.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) kfree(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * ishtp_io_rb_init() - Allocate and init IO request block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @cl: client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Allocate and initialize request block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Return: Allocted IO request block pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct ishtp_cl_rb *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) INIT_LIST_HEAD(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) rb->cl = cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) rb->buf_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * @rb: IO request block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * @length: length of response buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Allocate respose buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * Return: 0 on success else -ENOMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rb->buffer.data = kmalloc(length, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!rb->buffer.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) rb->buffer.size = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * @rb: IO request block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Re-append rb to its client's free list and send flow control if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Return: 0 on success else -EFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct ishtp_cl *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int rets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!rb || !rb->cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cl = rb->cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) spin_lock_irqsave(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) list_add_tail(&rb->list, &cl->free_rb_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * If we returned the first buffer to empty 'free' list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * send flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!cl->out_flow_ctrl_creds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) rets = ishtp_cl_read_start(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * @cl: Pointer to client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Look client device tx buffer list, and check whether this list is empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Return: true if client tx buffer list is empty else false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int tx_list_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned long tx_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) tx_list_empty = list_empty(&cl->tx_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return !!tx_list_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) EXPORT_SYMBOL(ishtp_cl_tx_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @cl: Pointer to client device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * Check client device in-processing buffer list and get a rb from it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Return: rb pointer if buffer list isn't empty else NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned long rx_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct ishtp_cl_rb *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rb = list_first_entry_or_null(&cl->in_process_list.list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct ishtp_cl_rb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) list_del_init(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) EXPORT_SYMBOL(ishtp_cl_rx_get_rb);