^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2015, Linaro Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/tee_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "tee_private.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct tee_shm *shm, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct gen_pool *genpool = poolm->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) size_t s = roundup(size, 1 << genpool->min_alloc_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) va = gen_pool_alloc(genpool, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) if (!va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) memset((void *)va, 0, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) shm->kaddr = (void *)va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) shm->paddr = gen_pool_virt_to_phys(genpool, va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) shm->size = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct tee_shm *shm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) shm->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) shm->kaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) gen_pool_destroy(poolm->private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) kfree(poolm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .alloc = pool_op_gen_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .free = pool_op_gen_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .destroy_poolmgr = pool_op_gen_destroy_poolmgr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * memory range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @priv_info: Information for driver private shared memory pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @dmabuf_info: Information for dma-buf shared memory pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Start and end of pools will must be page aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * in @dmabuf, others will use the range provided by @priv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct tee_shm_pool *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct tee_shm_pool_mem_info *dmabuf_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct tee_shm_pool_mgr *priv_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct tee_shm_pool_mgr *dmabuf_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) void *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Create the pool for driver private shared memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) priv_info->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 3 /* 8 byte aligned */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (IS_ERR(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) priv_mgr = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Create the pool for dma_buf shared memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) dmabuf_info->paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) dmabuf_info->size, PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (IS_ERR(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) goto err_free_priv_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) dmabuf_mgr = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (IS_ERR(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) goto err_free_dmabuf_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) err_free_dmabuf_mgr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) tee_shm_pool_mgr_destroy(dmabuf_mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) err_free_priv_mgr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) tee_shm_pool_mgr_destroy(priv_mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) phys_addr_t paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int min_alloc_order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) const size_t page_mask = PAGE_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct tee_shm_pool_mgr *mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Start and end must be page aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (vaddr & page_mask || paddr & page_mask || size & page_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!mgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) mgr->private_data = gen_pool_create(min_alloc_order, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (!mgr->private_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) gen_pool_destroy(mgr->private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) mgr->ops = &pool_ops_generic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) kfree(mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) mgr->ops->destroy_poolmgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct tee_shm_pool_mgr *dmabuf_mgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct tee_shm_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) pool = kzalloc(sizeof(*pool), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) pool->private_mgr = priv_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pool->dma_buf_mgr = dmabuf_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * tee_shm_pool_free() - Free a shared memory pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @pool: The shared memory pool to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * There must be no remaining shared memory allocated from this pool when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * this function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) void tee_shm_pool_free(struct tee_shm_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (pool->private_mgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) tee_shm_pool_mgr_destroy(pool->private_mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (pool->dma_buf_mgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) kfree(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) EXPORT_SYMBOL_GPL(tee_shm_pool_free);