^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: MIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2019 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/tee_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/psp-sev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "amdtee_private.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned int order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) if (!va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) shm->kaddr = (void *)va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) shm->paddr = __psp_pa((void *)va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) shm->size = PAGE_SIZE << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Map the allocated memory in to TEE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) rc = amdtee_map_shmem(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) free_pages(va, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) shm->kaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Unmap the shared memory from TEE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) amdtee_unmap_shmem(shm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) free_pages((unsigned long)shm->kaddr, get_order(shm->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) shm->kaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) kfree(poolm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static const struct tee_shm_pool_mgr_ops pool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .alloc = pool_op_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) .free = pool_op_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .destroy_poolmgr = pool_op_destroy_poolmgr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (!mgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mgr->ops = &pool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct tee_shm_pool *amdtee_config_shm(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct tee_shm_pool_mgr *priv_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct tee_shm_pool_mgr *dmabuf_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) rc = pool_mem_mgr_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (IS_ERR(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) priv_mgr = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) rc = pool_mem_mgr_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (IS_ERR(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) tee_shm_pool_mgr_destroy(priv_mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) dmabuf_mgr = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (IS_ERR(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) tee_shm_pool_mgr_destroy(priv_mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) tee_shm_pool_mgr_destroy(dmabuf_mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }