^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ISHTP DMA I/F functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2003-2016, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "ishtp-dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "client.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * ishtp_cl_alloc_dma_buf() - Allocate DMA RX and TX buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * @dev: ishtp device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Allocate RX and TX DMA buffer once during bus setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * It allocates 1MB, RX and TX DMA buffer, which are divided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * into slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) dma_addr_t h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (dev->ishtp_host_dma_tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) dev->ishtp_host_dma_tx_buf_size = 1024*1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) dev->ishtp_host_dma_rx_buf_size = 1024*1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Allocate Tx buffer and init usage bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) dev->ishtp_host_dma_tx_buf = dma_alloc_coherent(dev->devc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) dev->ishtp_host_dma_tx_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) &h, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (dev->ishtp_host_dma_tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) dev->ishtp_host_dma_tx_buf_phys = h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) dev->ishtp_dma_num_slots = dev->ishtp_host_dma_tx_buf_size /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) DMA_SLOT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) dev->ishtp_dma_tx_map = kcalloc(dev->ishtp_dma_num_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) sizeof(uint8_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) spin_lock_init(&dev->ishtp_dma_tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Allocate Rx buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) dev->ishtp_host_dma_rx_buf = dma_alloc_coherent(dev->devc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) dev->ishtp_host_dma_rx_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) &h, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (dev->ishtp_host_dma_rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) dev->ishtp_host_dma_rx_buf_phys = h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * ishtp_cl_free_dma_buf() - Free DMA RX and TX buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @dev: ishtp device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Free DMA buffer when all clients are released. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * only happens during error path in ISH built in driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void ishtp_cl_free_dma_buf(struct ishtp_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) dma_addr_t h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (dev->ishtp_host_dma_tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) h = dev->ishtp_host_dma_tx_buf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) dma_free_coherent(dev->devc, dev->ishtp_host_dma_tx_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) dev->ishtp_host_dma_tx_buf, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (dev->ishtp_host_dma_rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) h = dev->ishtp_host_dma_rx_buf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) dma_free_coherent(dev->devc, dev->ishtp_host_dma_rx_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) dev->ishtp_host_dma_rx_buf, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) kfree(dev->ishtp_dma_tx_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) dev->ishtp_host_dma_tx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) dev->ishtp_host_dma_rx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) dev->ishtp_dma_tx_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * ishtp_cl_get_dma_send_buf() - Get a DMA memory slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @dev: ishtp device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @size: Size of memory to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Find and return free address of "size" bytes in dma tx buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * the function will mark this address as "in-used" memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Return: NULL when no free buffer else a buffer to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) uint32_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int i, j, free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* additional slot is needed if there is rem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int required_slots = (size / DMA_SLOT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) + 1 * (size % DMA_SLOT_SIZE != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) for (j = 0; j < required_slots; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (dev->ishtp_dma_tx_map[i+j]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) i += j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* mark memory as "caught" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) for (j = 0; j < required_slots; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) dev->ishtp_dma_tx_map[i+j] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return (i * DMA_SLOT_SIZE) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) (unsigned char *)dev->ishtp_host_dma_tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) dev_err(dev->devc, "No free DMA buffer to send msg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * ishtp_cl_release_dma_acked_mem() - Release DMA memory slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @dev: ishtp device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @msg_addr: message address of slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * @size: Size of memory to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Release_dma_acked_mem - returnes the acked memory to free list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * (from msg_addr, size bytes long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void *msg_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) uint8_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int acked_slots = (size / DMA_SLOT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) + 1 * (size % DMA_SLOT_SIZE != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) dev_err(dev->devc, "Bad DMA Tx ack address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) for (j = 0; j < acked_slots; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if ((i + j) >= dev->ishtp_dma_num_slots ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) !dev->ishtp_dma_tx_map[i+j]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* no such slot, or memory is already free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dev_err(dev->devc, "Bad DMA Tx ack address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dev->ishtp_dma_tx_map[i+j] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }