^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Copyright (c) 2018, Linaro Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/rpmsg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <uapi/misc/fastrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define ADSP_DOMAIN_ID (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define MDSP_DOMAIN_ID (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define SDSP_DOMAIN_ID (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define CDSP_DOMAIN_ID (3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define FASTRPC_ALIGN 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define FASTRPC_MAX_FDLIST 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define FASTRPC_MAX_CRCLIST 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define FASTRPC_PHYS(p) ((p) & 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define FASTRPC_CTX_MAX (256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define FASTRPC_INIT_HANDLE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define FASTRPC_CTXID_MASK (0xFF0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define INIT_FILELEN_MAX (2 * 1024 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define FASTRPC_DEVICE_NAME "fastrpc"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define ADSP_MMAP_ADD_PAGES 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Retrives number of input buffers from the scalars parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Retrives number of output buffers from the scalars parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Retrives number of input handles from the scalars parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* Retrives number of output handles from the scalars parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) REMOTE_SCALARS_OUTBUFS(sc) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) REMOTE_SCALARS_INHANDLES(sc)+ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) REMOTE_SCALARS_OUTHANDLES(sc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) (((attr & 0x07) << 29) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ((method & 0x1f) << 24) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ((in & 0xff) << 16) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ((out & 0xff) << 8) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ((oin & 0x0f) << 4) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) (oout & 0x0f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define FASTRPC_SCALARS(method, in, out) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define FASTRPC_CREATE_PROCESS_NARGS 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Remote Method id table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define FASTRPC_RMID_INIT_ATTACH 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define FASTRPC_RMID_INIT_RELEASE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define FASTRPC_RMID_INIT_MMAP 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define FASTRPC_RMID_INIT_MUNMAP 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define FASTRPC_RMID_INIT_CREATE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define FASTRPC_RMID_INIT_CREATE_ATTR 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define FASTRPC_RMID_INIT_CREATE_STATIC 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Protection Domain(PD) ids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define AUDIO_PD (0) /* also GUEST_OS PD? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define USER_PD (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SENSORS_PD (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) "sdsp", "cdsp"};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct fastrpc_phy_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u64 addr; /* physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 size; /* size of contiguous region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct fastrpc_invoke_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 num; /* number of contiguous regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u32 pgidx; /* index to start of contiguous region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct fastrpc_remote_arg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u64 pv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u64 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct fastrpc_mmap_rsp_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u64 vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct fastrpc_mmap_req_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) s32 pgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u64 vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) s32 num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct fastrpc_munmap_req_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) s32 pgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u64 vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct fastrpc_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int pid; /* process group id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int tid; /* thread id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u64 ctx; /* invoke caller context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u32 handle; /* handle to invoke */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u32 sc; /* scalars structure describing the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u64 addr; /* physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u64 size; /* size of contiguous region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct fastrpc_invoke_rsp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u64 ctx; /* invoke caller context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int retval; /* invoke return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct fastrpc_buf_overlap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int raix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u64 mstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u64 mend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct fastrpc_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct fastrpc_user *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct dma_buf *dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u64 phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Lock for dma buf attachments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct list_head attachments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* mmap support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct list_head node; /* list of user requested mmaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) uintptr_t raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct fastrpc_dma_buf_attachment {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct sg_table sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct fastrpc_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct fastrpc_user *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct dma_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct sg_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct dma_buf_attachment *attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u64 phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u64 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct kref refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct fastrpc_invoke_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int nscalars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int nbufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u32 sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u32 *crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u64 ctxid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u64 msg_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct kref refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct list_head node; /* list of ctxs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct completion work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct work_struct put_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct fastrpc_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct fastrpc_user *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct fastrpc_remote_arg *rpra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct fastrpc_map **maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct fastrpc_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct fastrpc_invoke_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct fastrpc_buf_overlap *olaps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct fastrpc_channel_ctx *cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct fastrpc_session_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bool used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) bool valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct fastrpc_channel_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int domain_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int sesscount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct rpmsg_device *rpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct idr ctx_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct list_head users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct miscdevice miscdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct kref refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct fastrpc_user {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct list_head user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct list_head maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct list_head pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct list_head mmaps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct fastrpc_channel_ctx *cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct fastrpc_session_ctx *sctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct fastrpc_buf *init_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Lock for lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* lock for allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct mutex mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void fastrpc_free_map(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct fastrpc_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) map = container_of(ref, struct fastrpc_map, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (map->table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dma_buf_unmap_attachment(map->attach, map->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dma_buf_detach(map->buf, map->attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dma_buf_put(map->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static void fastrpc_map_put(struct fastrpc_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) kref_put(&map->refcount, fastrpc_free_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void fastrpc_map_get(struct fastrpc_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) kref_get(&map->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct fastrpc_map **ppmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct fastrpc_map *map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) mutex_lock(&fl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) list_for_each_entry(map, &fl->maps, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (map->fd == fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) fastrpc_map_get(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) *ppmap = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) mutex_unlock(&fl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) mutex_unlock(&fl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static void fastrpc_buf_free(struct fastrpc_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dma_free_coherent(buf->dev, buf->size, buf->virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) FASTRPC_PHYS(buf->phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) u64 size, struct fastrpc_buf **obuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct fastrpc_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) buf = kzalloc(sizeof(*buf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) INIT_LIST_HEAD(&buf->attachments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) INIT_LIST_HEAD(&buf->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) mutex_init(&buf->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) buf->fl = fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) buf->virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) buf->phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) buf->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) buf->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) buf->raddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!buf->virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mutex_destroy(&buf->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (fl->sctx && fl->sctx->sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) buf->phys += ((u64)fl->sctx->sid << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *obuf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void fastrpc_channel_ctx_free(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct fastrpc_channel_ctx *cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) kfree(cctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) kref_get(&cctx->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void fastrpc_context_free(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct fastrpc_invoke_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct fastrpc_channel_ctx *cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) cctx = ctx->cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) for (i = 0; i < ctx->nscalars; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) fastrpc_map_put(ctx->maps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (ctx->buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) fastrpc_buf_free(ctx->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) spin_lock_irqsave(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) kfree(ctx->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) kfree(ctx->olaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) fastrpc_channel_ctx_put(cctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) kref_get(&ctx->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) kref_put(&ctx->refcount, fastrpc_context_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static void fastrpc_context_put_wq(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct fastrpc_invoke_ctx *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) container_of(work, struct fastrpc_invoke_ctx, put_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) fastrpc_context_put(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int olaps_cmp(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* sort with lowest starting buffer first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) int st = CMP(pa->start, pb->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* sort with highest ending buffer first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int ed = CMP(pb->end, pa->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return st == 0 ? ed : st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) u64 max_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) for (i = 0; i < ctx->nbufs; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ctx->olaps[i].start = ctx->args[i].ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ctx->olaps[i].raix = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) for (i = 0; i < ctx->nbufs; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* Falling inside previous range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (ctx->olaps[i].start < max_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ctx->olaps[i].mstart = max_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ctx->olaps[i].mend = ctx->olaps[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (ctx->olaps[i].end > max_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) max_end = ctx->olaps[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ctx->olaps[i].mend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ctx->olaps[i].mstart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ctx->olaps[i].mend = ctx->olaps[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ctx->olaps[i].mstart = ctx->olaps[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ctx->olaps[i].offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) max_end = ctx->olaps[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct fastrpc_user *user, u32 kernel, u32 sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct fastrpc_invoke_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct fastrpc_channel_ctx *cctx = user->cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct fastrpc_invoke_ctx *ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) INIT_LIST_HEAD(&ctx->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ctx->fl = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) REMOTE_SCALARS_OUTBUFS(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (ctx->nscalars) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ctx->maps = kcalloc(ctx->nscalars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) sizeof(*ctx->maps), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!ctx->maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ctx->olaps = kcalloc(ctx->nscalars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) sizeof(*ctx->olaps), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!ctx->olaps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) kfree(ctx->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ctx->args = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) fastrpc_get_buff_overlaps(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* Released in fastrpc_context_put() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) fastrpc_channel_ctx_get(cctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ctx->sc = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ctx->retval = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ctx->pid = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ctx->tgid = user->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ctx->cctx = cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) init_completion(&ctx->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) spin_lock(&user->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) list_add_tail(&ctx->node, &user->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) spin_unlock(&user->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) spin_lock_irqsave(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) FASTRPC_CTX_MAX, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) goto err_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ctx->ctxid = ret << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) kref_init(&ctx->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) err_idr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) spin_lock(&user->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) list_del(&ctx->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) spin_unlock(&user->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) fastrpc_channel_ctx_put(cctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) kfree(ctx->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) kfree(ctx->olaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static struct sg_table *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct fastrpc_dma_buf_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct sg_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) table = &a->sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ret = dma_map_sgtable(attachment->dev, table, dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) table = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct sg_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dma_unmap_sgtable(attach->dev, table, dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static void fastrpc_release(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct fastrpc_buf *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) fastrpc_buf_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct dma_buf_attachment *attachment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct fastrpc_dma_buf_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct fastrpc_buf *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) a = kzalloc(sizeof(*a), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) FASTRPC_PHYS(buffer->phys), buffer->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) kfree(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) a->dev = attachment->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) INIT_LIST_HEAD(&a->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) attachment->priv = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) list_add(&a->node, &buffer->attachments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct dma_buf_attachment *attachment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct fastrpc_dma_buf_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct fastrpc_buf *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) list_del(&a->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) sg_free_table(&a->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) kfree(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static void *fastrpc_vmap(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct fastrpc_buf *buf = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return buf->virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static int fastrpc_mmap(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct fastrpc_buf *buf = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) size_t size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return dma_mmap_coherent(buf->dev, vma, buf->virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) FASTRPC_PHYS(buf->phys), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static const struct dma_buf_ops fastrpc_dma_buf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) .attach = fastrpc_dma_buf_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) .detach = fastrpc_dma_buf_detatch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) .map_dma_buf = fastrpc_map_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) .unmap_dma_buf = fastrpc_unmap_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) .mmap = fastrpc_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) .vmap = fastrpc_vmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) .release = fastrpc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) u64 len, struct fastrpc_map **ppmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct fastrpc_session_ctx *sess = fl->sctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct fastrpc_map *map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!fastrpc_map_find(fl, fd, ppmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) map = kzalloc(sizeof(*map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) INIT_LIST_HEAD(&map->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) map->fl = fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) map->fd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) map->buf = dma_buf_get(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (IS_ERR(map->buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) err = PTR_ERR(map->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) goto get_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) map->attach = dma_buf_attach(map->buf, sess->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (IS_ERR(map->attach)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dev_err(sess->dev, "Failed to attach dmabuf\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) err = PTR_ERR(map->attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) goto attach_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (IS_ERR(map->table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) err = PTR_ERR(map->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) goto map_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) map->phys = sg_dma_address(map->table->sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) map->phys += ((u64)fl->sctx->sid << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) map->size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) map->va = sg_virt(map->table->sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) map->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) kref_init(&map->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) spin_lock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) list_add_tail(&map->node, &fl->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) spin_unlock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) *ppmap = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) map_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dma_buf_detach(map->buf, map->attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) attach_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) dma_buf_put(map->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) get_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * Fastrpc payload buffer with metadata looks like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * >>>>>> START of METADATA <<<<<<<<<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * +---------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * | Arguments |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * | type:(struct fastrpc_remote_arg)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * | (0 - N) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * +---------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * | Invoke Buffer list |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * | type:(struct fastrpc_invoke_buf)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * | (0 - N) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * +---------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * | Page info list |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * | type:(struct fastrpc_phy_page) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * | (0 - N) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * +---------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * | Optional info |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * |(can be specific to SoC/Firmware)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * +---------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * >>>>>>>> END of METADATA <<<<<<<<<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * +---------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * | Inline ARGS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * | (0-N) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * +---------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) size = (sizeof(struct fastrpc_remote_arg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) sizeof(struct fastrpc_invoke_buf) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) sizeof(u64) * FASTRPC_MAX_FDLIST +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) sizeof(u32) * FASTRPC_MAX_CRCLIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) u64 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) int oix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) size = ALIGN(metalen, FASTRPC_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) for (oix = 0; oix < ctx->nbufs; oix++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int i = ctx->olaps[oix].raix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (ctx->olaps[oix].offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) size = ALIGN(size, FASTRPC_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct device *dev = ctx->fl->sctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) for (i = 0; i < ctx->nscalars; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* Make sure reserved field is set to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (ctx->args[i].reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) ctx->args[i].length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ctx->args[i].length, &ctx->maps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) dev_err(dev, "Error Creating map %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct device *dev = ctx->fl->sctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct fastrpc_remote_arg *rpra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct fastrpc_invoke_buf *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct fastrpc_phy_page *pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) int inbufs, i, oix, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) u64 len, rlen, pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) u64 pg_start, pg_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) uintptr_t args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int metalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) metalen = fastrpc_get_meta_size(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) pkt_size = fastrpc_get_payload_size(ctx, metalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) err = fastrpc_create_maps(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) ctx->msg_sz = pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) rpra = ctx->buf->virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) sizeof(*rpra));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) args = (uintptr_t)ctx->buf->virt + metalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) rlen = pkt_size - metalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ctx->rpra = rpra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) for (oix = 0; oix < ctx->nbufs; ++oix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) int mlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) i = ctx->olaps[oix].raix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) len = ctx->args[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) rpra[i].pv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) rpra[i].len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) list[i].num = len ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) list[i].pgidx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (ctx->maps[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct vm_area_struct *vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) rpra[i].pv = (u64) ctx->args[i].ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pages[i].addr = ctx->maps[i]->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) mmap_read_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) vma = find_vma(current->mm, ctx->args[i].ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) pages[i].addr += ctx->args[i].ptr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) mmap_read_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (ctx->olaps[oix].offset == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) args = ALIGN(args, FASTRPC_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (rlen < mlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) rpra[i].pv = args - ctx->olaps[oix].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) pages[i].addr = ctx->buf->phys -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) ctx->olaps[oix].offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) (pkt_size - rlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) pages[i].addr = pages[i].addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) args = args + mlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) rlen -= mlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (i < inbufs && !ctx->maps[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) void *dst = (void *)(uintptr_t)rpra[i].pv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) void *src = (void *)(uintptr_t)ctx->args[i].ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (copy_from_user(dst, (void __user *)src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) rpra[i].pv = (u64) ctx->args[i].ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) rpra[i].len = ctx->args[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) list[i].num = ctx->args[i].length ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) list[i].pgidx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) pages[i].addr = ctx->maps[i]->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) pages[i].size = ctx->maps[i]->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dev_err(dev, "Error: get invoke args failed:%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) u32 kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct fastrpc_remote_arg *rpra = ctx->rpra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) int i, inbufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) for (i = inbufs; i < ctx->nbufs; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) void *src = (void *)(uintptr_t)rpra[i].pv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) u64 len = rpra[i].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (copy_to_user((void __user *)dst, src, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct fastrpc_invoke_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) u32 kernel, uint32_t handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct fastrpc_channel_ctx *cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct fastrpc_user *fl = ctx->fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct fastrpc_msg *msg = &ctx->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) cctx = fl->cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) msg->pid = fl->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) msg->tid = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) msg->pid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) msg->ctx = ctx->ctxid | fl->pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) msg->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) msg->sc = ctx->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) msg->addr = ctx->buf ? ctx->buf->phys : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) fastrpc_context_get(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) fastrpc_context_put(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u32 handle, u32 sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct fastrpc_invoke_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct fastrpc_invoke_ctx *ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (!fl->sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (!fl->cctx->rpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (handle == FASTRPC_INIT_HANDLE && !kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ctx = fastrpc_context_alloc(fl, kernel, sc, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (IS_ERR(ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return PTR_ERR(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (ctx->nscalars) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) err = fastrpc_get_args(kernel, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* make sure that all CPU memory writes are seen by DSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* Send invoke buffer to remote dsp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) err = wait_for_completion_interruptible(&ctx->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /* Check the response from remote dsp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) err = ctx->retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (ctx->nscalars) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* make sure that all memory writes by DSP are seen by CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /* populate all the output buffers with results */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) err = fastrpc_put_args(ctx, kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /* We are done with this compute context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) spin_lock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) list_del(&ctx->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) spin_unlock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) fastrpc_context_put(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static int fastrpc_init_create_process(struct fastrpc_user *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) char __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct fastrpc_init_create init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct fastrpc_invoke_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct fastrpc_phy_page pages[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct fastrpc_map *map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct fastrpc_buf *imem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int memlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) int pgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) u32 namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) u32 filelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) u32 pageslen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) u32 attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) u32 siglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) } inbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) u32 sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (copy_from_user(&init, argp, sizeof(init))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (init.filelen > INIT_FILELEN_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) inbuf.pgid = fl->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) inbuf.namelen = strlen(current->comm) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) inbuf.filelen = init.filelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) inbuf.pageslen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) inbuf.attrs = init.attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) inbuf.siglen = init.siglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) fl->pd = USER_PD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (init.filelen && init.filefd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) &imem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) fl->init_mem = imem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) args[0].ptr = (u64)(uintptr_t)&inbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) args[0].length = sizeof(inbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) args[0].fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) args[1].ptr = (u64)(uintptr_t)current->comm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) args[1].length = inbuf.namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) args[1].fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) args[2].ptr = (u64) init.file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) args[2].length = inbuf.filelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) args[2].fd = init.filefd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) pages[0].addr = imem->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) pages[0].size = imem->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) args[3].ptr = (u64)(uintptr_t) pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) args[3].length = 1 * sizeof(*pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) args[3].fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) args[4].length = sizeof(inbuf.attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) args[4].fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) args[5].length = sizeof(inbuf.siglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) args[5].fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (init.attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) sc, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) goto err_invoke;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) kfree(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) err_invoke:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) fl->init_mem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) fastrpc_buf_free(imem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) spin_lock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) list_del(&map->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) spin_unlock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) fastrpc_map_put(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) kfree(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static struct fastrpc_session_ctx *fastrpc_session_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct fastrpc_channel_ctx *cctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct fastrpc_session_ctx *session = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) spin_lock_irqsave(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) for (i = 0; i < cctx->sesscount; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (!cctx->session[i].used && cctx->session[i].valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) cctx->session[i].used = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) session = &cctx->session[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct fastrpc_session_ctx *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) spin_lock_irqsave(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) session->used = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct fastrpc_invoke_args args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int tgid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) u32 sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) tgid = fl->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) args[0].ptr = (u64)(uintptr_t) &tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) args[0].length = sizeof(tgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) args[0].fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) args[0].reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) sc, &args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static int fastrpc_device_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct fastrpc_channel_ctx *cctx = fl->cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct fastrpc_invoke_ctx *ctx, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct fastrpc_map *map, *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct fastrpc_buf *buf, *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) fastrpc_release_current_dsp_process(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) spin_lock_irqsave(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) list_del(&fl->user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (fl->init_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) fastrpc_buf_free(fl->init_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) list_for_each_entry_safe(ctx, n, &fl->pending, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) list_del(&ctx->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) fastrpc_context_put(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) list_for_each_entry_safe(map, m, &fl->maps, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) list_del(&map->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) fastrpc_map_put(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) list_del(&buf->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) fastrpc_buf_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) fastrpc_session_free(cctx, fl->sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) fastrpc_channel_ctx_put(cctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) mutex_destroy(&fl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) kfree(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) file->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static int fastrpc_device_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct fastrpc_user *fl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) fl = kzalloc(sizeof(*fl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (!fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /* Released in fastrpc_device_release() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) fastrpc_channel_ctx_get(cctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) filp->private_data = fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) spin_lock_init(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) mutex_init(&fl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) INIT_LIST_HEAD(&fl->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) INIT_LIST_HEAD(&fl->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) INIT_LIST_HEAD(&fl->mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) INIT_LIST_HEAD(&fl->user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) fl->tgid = current->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) fl->cctx = cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) fl->sctx = fastrpc_session_alloc(cctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (!fl->sctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) dev_err(&cctx->rpdev->dev, "No session available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) mutex_destroy(&fl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) kfree(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) spin_lock_irqsave(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) list_add_tail(&fl->user, &cctx->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct fastrpc_alloc_dma_buf bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct fastrpc_buf *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (copy_from_user(&bp, argp, sizeof(bp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) exp_info.ops = &fastrpc_dma_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) exp_info.size = bp.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) exp_info.flags = O_RDWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) exp_info.priv = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) buf->dmabuf = dma_buf_export(&exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (IS_ERR(buf->dmabuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) err = PTR_ERR(buf->dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) fastrpc_buf_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (bp.fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) dma_buf_put(buf->dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (copy_to_user(argp, &bp, sizeof(bp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * The usercopy failed, but we can't do much about it, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * dma_buf_fd() already called fd_install() and made the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * file descriptor accessible for the current process. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * might already be closed and dmabuf no longer valid when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * we reach this point. Therefore "leak" the fd and rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * the process exit path to do any required cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct fastrpc_invoke_args args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) int tgid = fl->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) u32 sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) args[0].ptr = (u64)(uintptr_t) &tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) args[0].length = sizeof(tgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) args[0].fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) args[0].reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) fl->pd = pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) sc, &args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct fastrpc_invoke_args *args = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct fastrpc_invoke inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) u32 nscalars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (copy_from_user(&inv, argp, sizeof(inv)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* nscalars is truncated here to max supported value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (nscalars) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (!args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) nscalars * sizeof(*args))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) kfree(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) kfree(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct fastrpc_req_munmap *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct fastrpc_buf *buf, *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) struct fastrpc_munmap_req_msg req_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct device *dev = fl->sctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) u32 sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) spin_lock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) spin_unlock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) dev_err(dev, "mmap not in list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) req_msg.pgid = fl->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) req_msg.size = buf->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) req_msg.vaddr = buf->raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) args[0].ptr = (u64) (uintptr_t) &req_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) args[0].length = sizeof(req_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) &args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) spin_lock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) list_del(&buf->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) spin_unlock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) fastrpc_buf_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) struct fastrpc_req_munmap req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (copy_from_user(&req, argp, sizeof(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return fastrpc_req_munmap_impl(fl, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) struct fastrpc_buf *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) struct fastrpc_mmap_req_msg req_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct fastrpc_mmap_rsp_msg rsp_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) struct fastrpc_req_munmap req_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct fastrpc_phy_page pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) struct fastrpc_req_mmap req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) struct device *dev = fl->sctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) u32 sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (copy_from_user(&req, argp, sizeof(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (req.flags != ADSP_MMAP_ADD_PAGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) dev_err(dev, "flag not supported 0x%x\n", req.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (req.vaddrin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) dev_err(dev, "adding user allocated pages is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) dev_err(dev, "failed to allocate buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) req_msg.pgid = fl->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) req_msg.flags = req.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) req_msg.vaddr = req.vaddrin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) req_msg.num = sizeof(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) args[0].ptr = (u64) (uintptr_t) &req_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) args[0].length = sizeof(req_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) pages.addr = buf->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) pages.size = buf->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) args[1].ptr = (u64) (uintptr_t) &pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) args[1].length = sizeof(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) args[2].ptr = (u64) (uintptr_t) &rsp_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) args[2].length = sizeof(rsp_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) &args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) goto err_invoke;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) /* update the buffer to be able to deallocate the memory on the DSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) buf->raddr = (uintptr_t) rsp_msg.vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /* let the client know the address to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) req.vaddrout = rsp_msg.vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) spin_lock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) list_add_tail(&buf->node, &fl->mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) spin_unlock(&fl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /* unmap the memory and release the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) req_unmap.vaddrout = buf->raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) req_unmap.size = buf->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) fastrpc_req_munmap_impl(fl, &req_unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) buf->raddr, buf->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) err_invoke:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) fastrpc_buf_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) char __user *argp = (char __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) case FASTRPC_IOCTL_INVOKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) err = fastrpc_invoke(fl, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) case FASTRPC_IOCTL_INIT_ATTACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) err = fastrpc_init_attach(fl, AUDIO_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) case FASTRPC_IOCTL_INIT_ATTACH_SNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) err = fastrpc_init_attach(fl, SENSORS_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) case FASTRPC_IOCTL_INIT_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) err = fastrpc_init_create_process(fl, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) err = fastrpc_dmabuf_alloc(fl, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) case FASTRPC_IOCTL_MMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) err = fastrpc_req_mmap(fl, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) case FASTRPC_IOCTL_MUNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) err = fastrpc_req_munmap(fl, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) err = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static const struct file_operations fastrpc_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) .open = fastrpc_device_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) .release = fastrpc_device_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) .unlocked_ioctl = fastrpc_device_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) .compat_ioctl = fastrpc_device_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) static int fastrpc_cb_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct fastrpc_channel_ctx *cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct fastrpc_session_ctx *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) int i, sessions = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) cctx = dev_get_drvdata(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (!cctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) spin_lock_irqsave(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) sess = &cctx->session[cctx->sesscount];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) sess->used = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) sess->valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) sess->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) dev_set_drvdata(dev, sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) dev_info(dev, "FastRPC Session ID not specified in DT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (sessions > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) struct fastrpc_session_ctx *dup_sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) for (i = 1; i < sessions; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) dup_sess = &cctx->session[cctx->sesscount];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) memcpy(dup_sess, sess, sizeof(*dup_sess));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) cctx->sesscount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) rc = dma_set_mask(dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) dev_err(dev, "32-bit DMA enable failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) static int fastrpc_cb_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) spin_lock_irqsave(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (cctx->session[i].sid == sess->sid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) cctx->session[i].valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) cctx->sesscount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static const struct of_device_id fastrpc_match_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) { .compatible = "qcom,fastrpc-compute-cb", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static struct platform_driver fastrpc_cb_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) .probe = fastrpc_cb_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) .remove = fastrpc_cb_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) .name = "qcom,fastrpc-cb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) .of_match_table = fastrpc_match_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) struct device *rdev = &rpdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) struct fastrpc_channel_ctx *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) int i, err, domain_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) const char *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) err = of_property_read_string(rdev->of_node, "label", &domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) dev_info(rdev, "FastRPC Domain not specified in DT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (!strcmp(domains[i], domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) domain_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (domain_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) data->miscdev.minor = MISC_DYNAMIC_MINOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) domains[domain_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) data->miscdev.fops = &fastrpc_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) err = misc_register(&data->miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) kref_init(&data->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) dev_set_drvdata(&rpdev->dev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) INIT_LIST_HEAD(&data->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) spin_lock_init(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) idr_init(&data->ctx_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) data->domain_id = domain_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) data->rpdev = rpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static void fastrpc_notify_users(struct fastrpc_user *user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct fastrpc_invoke_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) spin_lock(&user->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) list_for_each_entry(ctx, &user->pending, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) complete(&ctx->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) spin_unlock(&user->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) struct fastrpc_user *user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) spin_lock_irqsave(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) list_for_each_entry(user, &cctx->users, user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) fastrpc_notify_users(user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) misc_deregister(&cctx->miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) of_platform_depopulate(&rpdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) cctx->rpdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) fastrpc_channel_ctx_put(cctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) int len, void *priv, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) struct fastrpc_invoke_rsp *rsp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) struct fastrpc_invoke_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) unsigned long ctxid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (len < sizeof(*rsp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) spin_lock_irqsave(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ctx = idr_find(&cctx->ctx_idr, ctxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) spin_unlock_irqrestore(&cctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (!ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) dev_err(&rpdev->dev, "No context ID matches response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) ctx->retval = rsp->retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) complete(&ctx->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * The DMA buffer associated with the context cannot be freed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * interrupt context so schedule it through a worker thread to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * avoid a kernel BUG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) schedule_work(&ctx->put_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static const struct of_device_id fastrpc_rpmsg_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) { .compatible = "qcom,fastrpc" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) static struct rpmsg_driver fastrpc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) .probe = fastrpc_rpmsg_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) .remove = fastrpc_rpmsg_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) .callback = fastrpc_rpmsg_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) .drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) .name = "qcom,fastrpc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) .of_match_table = fastrpc_rpmsg_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) static int fastrpc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) ret = platform_driver_register(&fastrpc_cb_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) pr_err("fastrpc: failed to register cb driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) ret = register_rpmsg_driver(&fastrpc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) pr_err("fastrpc: failed to register rpmsg driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) platform_driver_unregister(&fastrpc_cb_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) module_init(fastrpc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static void fastrpc_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) platform_driver_unregister(&fastrpc_cb_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) unregister_rpmsg_driver(&fastrpc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) module_exit(fastrpc_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) MODULE_LICENSE("GPL v2");