^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2015, Linaro Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/arm-smccc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/tee_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "optee_private.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "optee_smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct optee_call_waiter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct list_head list_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct completion c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static void optee_cq_wait_init(struct optee_call_queue *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct optee_call_waiter *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * We're preparing to make a call to secure world. In case we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * allocate a thread in secure world we'll end up waiting in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * optee_cq_wait_for_completion().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Normally if there's no contention in secure world the call will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * complete and we can cleanup directly with optee_cq_wait_final().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) mutex_lock(&cq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * We add ourselves to the queue, but we don't wait. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * guarantees that we don't lose a completion if secure world
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * returns busy and another thread just exited and try to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * someone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) init_completion(&w->c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) list_add_tail(&w->list_node, &cq->waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) mutex_unlock(&cq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct optee_call_waiter *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) wait_for_completion(&w->c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) mutex_lock(&cq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Move to end of list to get out of the way for other waiters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) list_del(&w->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) reinit_completion(&w->c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) list_add_tail(&w->list_node, &cq->waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mutex_unlock(&cq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static void optee_cq_complete_one(struct optee_call_queue *cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct optee_call_waiter *w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) list_for_each_entry(w, &cq->waiters, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!completion_done(&w->c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) complete(&w->c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static void optee_cq_wait_final(struct optee_call_queue *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct optee_call_waiter *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * We're done with the call to secure world. The thread in secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * world that was used for this call is now available for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * other task to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) mutex_lock(&cq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Get out of the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) list_del(&w->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Wake up one eventual waiting task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) optee_cq_complete_one(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * If we're completed we've got a completion from another task that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * was just done with its call to secure world. Since yet another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * thread now is available in secure world wake up another eventual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * waiting task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (completion_done(&w->c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) optee_cq_complete_one(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mutex_unlock(&cq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Requires the filpstate mutex to be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static struct optee_session *find_session(struct optee_context_data *ctxdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 session_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct optee_session *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) list_for_each_entry(sess, &ctxdata->sess_list, list_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (sess->session_id == session_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @ctx: calling context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @parg: physical address of message to pass to secure world
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * Does and SMC to OP-TEE in secure world and handles eventual resulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Remote Procedure Calls (RPC) from OP-TEE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Returns return code from secure world, 0 is OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct optee *optee = tee_get_drvdata(ctx->teedev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct optee_call_waiter w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct optee_rpc_param param = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct optee_call_ctx call_ctx = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) param.a0 = OPTEE_SMC_CALL_WITH_ARG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) reg_pair_from_64(¶m.a1, ¶m.a2, parg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Initialize waiter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) optee_cq_wait_init(&optee->call_queue, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) param.a4, param.a5, param.a6, param.a7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Out of threads in secure world, wait for a thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * become available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) optee_cq_wait_for_completion(&optee->call_queue, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) param.a0 = res.a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) param.a1 = res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) param.a2 = res.a2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) param.a3 = res.a3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) optee_handle_rpc(ctx, ¶m, &call_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ret = res.a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) optee_rpc_finalize_call(&call_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * We're done with our thread in secure world, if there's any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * thread waiters wake up one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) optee_cq_wait_final(&optee->call_queue, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct optee_msg_arg **msg_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) phys_addr_t *msg_parg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct tee_shm *shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct optee_msg_arg *ma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) TEE_SHM_MAPPED | TEE_SHM_PRIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (IS_ERR(shm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ma = tee_shm_get_va(shm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (IS_ERR(ma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) rc = PTR_ERR(ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) rc = tee_shm_get_pa(shm, 0, msg_parg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ma->num_params = num_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *msg_arg = ma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) tee_shm_free(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int optee_open_session(struct tee_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct tee_ioctl_open_session_arg *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct tee_param *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct optee_context_data *ctxdata = ctx->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct tee_shm *shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct optee_msg_arg *msg_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) phys_addr_t msg_parg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct optee_session *sess = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) uuid_t client_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* +2 for the meta parameters added below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (IS_ERR(shm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return PTR_ERR(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) msg_arg->cancel_id = arg->cancel_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Initialize and add the meta parameters needed when opening a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * session.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) OPTEE_MSG_ATTR_META;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) OPTEE_MSG_ATTR_META;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) msg_arg->params[1].u.value.c = arg->clnt_login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) arg->clnt_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) export_uuid(msg_arg->params[1].u.octets, &client_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) sess = kzalloc(sizeof(*sess), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!sess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (optee_do_call_with_arg(ctx, msg_parg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) msg_arg->ret = TEEC_ERROR_COMMUNICATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (msg_arg->ret == TEEC_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* A new session has been created, add it to the list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) sess->session_id = msg_arg->session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) mutex_lock(&ctxdata->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) list_add(&sess->list_node, &ctxdata->sess_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) mutex_unlock(&ctxdata->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) kfree(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) arg->ret = TEEC_ERROR_COMMUNICATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) arg->ret_origin = TEEC_ORIGIN_COMMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Close session again to avoid leakage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) optee_close_session(ctx, msg_arg->session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) arg->session = msg_arg->session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) arg->ret = msg_arg->ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) arg->ret_origin = msg_arg->ret_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) tee_shm_free(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int optee_close_session(struct tee_context *ctx, u32 session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct optee_context_data *ctxdata = ctx->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct tee_shm *shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct optee_msg_arg *msg_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) phys_addr_t msg_parg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct optee_session *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Check that the session is valid and remove it from the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mutex_lock(&ctxdata->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) sess = find_session(ctxdata, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) list_del(&sess->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) mutex_unlock(&ctxdata->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) kfree(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (IS_ERR(shm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return PTR_ERR(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) msg_arg->session = session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) optee_do_call_with_arg(ctx, msg_parg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) tee_shm_free(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct tee_param *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct optee_context_data *ctxdata = ctx->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct tee_shm *shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct optee_msg_arg *msg_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) phys_addr_t msg_parg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct optee_session *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* Check that the session is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) mutex_lock(&ctxdata->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) sess = find_session(ctxdata, arg->session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) mutex_unlock(&ctxdata->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (IS_ERR(shm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return PTR_ERR(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) msg_arg->func = arg->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) msg_arg->session = arg->session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) msg_arg->cancel_id = arg->cancel_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (optee_do_call_with_arg(ctx, msg_parg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) msg_arg->ret = TEEC_ERROR_COMMUNICATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) msg_arg->ret = TEEC_ERROR_COMMUNICATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) arg->ret = msg_arg->ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) arg->ret_origin = msg_arg->ret_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) tee_shm_free(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct optee_context_data *ctxdata = ctx->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct tee_shm *shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct optee_msg_arg *msg_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) phys_addr_t msg_parg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct optee_session *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Check that the session is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) mutex_lock(&ctxdata->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) sess = find_session(ctxdata, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) mutex_unlock(&ctxdata->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (IS_ERR(shm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return PTR_ERR(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) msg_arg->session = session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) msg_arg->cancel_id = cancel_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) optee_do_call_with_arg(ctx, msg_parg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) tee_shm_free(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * optee_enable_shm_cache() - Enables caching of some shared memory allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * in OP-TEE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @optee: main service struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) void optee_enable_shm_cache(struct optee *optee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct optee_call_waiter w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* We need to retry until secure world isn't busy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) optee_cq_wait_init(&optee->call_queue, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (res.a0 == OPTEE_SMC_RETURN_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) optee_cq_wait_for_completion(&optee->call_queue, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) optee_cq_wait_final(&optee->call_queue, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * __optee_disable_shm_cache() - Disables caching of some shared memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * allocation in OP-TEE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * @optee: main service struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @is_mapped: true if the cached shared memory addresses were mapped by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * kernel, are safe to dereference, and should be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct optee_call_waiter w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* We need to retry until secure world isn't busy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) optee_cq_wait_init(&optee->call_queue, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct arm_smccc_res smccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct optee_smc_disable_shm_cache_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) } res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 0, &res.smccc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) break; /* All shm's freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (res.result.status == OPTEE_SMC_RETURN_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct tee_shm *shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Shared memory references that were not mapped by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * this kernel must be ignored to prevent a crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!is_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) shm = reg_pair_to_ptr(res.result.shm_upper32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) res.result.shm_lower32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) tee_shm_free(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) optee_cq_wait_for_completion(&optee->call_queue, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) optee_cq_wait_final(&optee->call_queue, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * optee_disable_shm_cache() - Disables caching of mapped shared memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * allocations in OP-TEE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * @optee: main service struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) void optee_disable_shm_cache(struct optee *optee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return __optee_disable_shm_cache(optee, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * allocations in OP-TEE which are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * currently mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * @optee: main service struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) void optee_disable_unmapped_shm_cache(struct optee *optee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return __optee_disable_shm_cache(optee, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) #define PAGELIST_ENTRIES_PER_PAGE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * optee_fill_pages_list() - write list of user pages to given shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * @dst: page-aligned buffer where list of pages will be stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * @pages: array of pages that represents shared buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * @num_pages: number of entries in @pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * @page_offset: offset of user buffer from page start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * @dst should be big enough to hold list of user page addresses and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * links to the next pages of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) size_t page_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) phys_addr_t optee_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) u64 next_page_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) } *pages_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Currently OP-TEE uses 4k page size and it does not looks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * like this will change in the future. On other hand, there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * no know ARM architectures with page size < 4k.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * Thus the next built assert looks redundant. But the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * code heavily relies on this assumption, so it is better be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * safe than sorry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) pages_data = (void *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * If linux page is bigger than 4k, and user buffer offset is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * larger than 4k/8k/12k/etc this will skip first 4k pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * because they bear no value data for OP-TEE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) optee_page = page_to_phys(*pages) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) pages_data->pages_list[n++] = optee_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (n == PAGELIST_ENTRIES_PER_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pages_data->next_page_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) virt_to_phys(pages_data + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) pages_data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!(optee_page & ~PAGE_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!--num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) optee_page = page_to_phys(*pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * The final entry in each pagelist page is a pointer to the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * pagelist page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static size_t get_pages_list_size(size_t num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) u64 *optee_allocate_pages_list(size_t num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) void optee_free_pages_list(void *list, size_t num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) free_pages_exact(list, get_pages_list_size(num_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static bool is_normal_memory(pgprot_t p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #if defined(CONFIG_ARM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #elif defined(CONFIG_ARM64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #error "Unuspported architecture"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) while (vma && is_normal_memory(vma->vm_page_prot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (vma->vm_end >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) vma = vma->vm_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static int check_mem_type(unsigned long start, size_t num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * Allow kernel address to register with OP-TEE as kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * pages are configured as normal memory only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (virt_addr_valid(start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) rc = __check_mem_type(find_vma(mm, start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) start + num_pages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct page **pages, size_t num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) unsigned long start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct tee_shm *shm_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct optee_msg_arg *msg_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) u64 *pages_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) phys_addr_t msg_parg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (!num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) rc = check_mem_type(start, num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) pages_list = optee_allocate_pages_list(num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (!pages_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (IS_ERR(shm_arg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) rc = PTR_ERR(shm_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) optee_fill_pages_list(pages_list, pages, num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) tee_shm_get_page_offset(shm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) OPTEE_MSG_ATTR_NONCONTIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * In the least bits of msg_arg->params->u.tmem.buf_ptr we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * store buffer offset from 4k page, as described in OP-TEE ABI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (optee_do_call_with_arg(ctx, msg_parg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) msg_arg->ret != TEEC_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) tee_shm_free(shm_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) optee_free_pages_list(pages_list, num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct tee_shm *shm_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct optee_msg_arg *msg_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) phys_addr_t msg_parg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (IS_ERR(shm_arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return PTR_ERR(shm_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (optee_do_call_with_arg(ctx, msg_parg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) msg_arg->ret != TEEC_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) tee_shm_free(shm_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct page **pages, size_t num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) unsigned long start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * We don't want to register supplicant memory in OP-TEE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * Instead information about it will be passed in RPC code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return check_mem_type(start, num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }