^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) Rockchip Electronics Co.Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Felix Zeng <felix.zeng@rock-chips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <drm/drm_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <drm/drm_vma_manager.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <drm/drm_prime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <drm/drm_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <drm/drm_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/shmem_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pfn_t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "rknpu_drv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "rknpu_ioctl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "rknpu_gem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define RKNPU_GEM_ALLOC_FROM_PAGES 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #if RKNPU_GEM_ALLOC_FROM_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static int rknpu_gem_get_pages(struct rknpu_gem_object *rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct scatterlist *s = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) dma_addr_t dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) dma_addr_t phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int ret = -EINVAL, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) rknpu_obj->pages = drm_gem_get_pages(&rknpu_obj->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (IS_ERR(rknpu_obj->pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) ret = PTR_ERR(rknpu_obj->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) LOG_ERROR("failed to get pages: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) rknpu_obj->num_pages = rknpu_obj->size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) rknpu_obj->sgt = drm_prime_pages_to_sg(drm, rknpu_obj->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) rknpu_obj->num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) rknpu_obj->sgt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) drm_prime_pages_to_sg(rknpu_obj->pages, rknpu_obj->num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (IS_ERR(rknpu_obj->sgt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ret = PTR_ERR(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) LOG_ERROR("failed to allocate sgt: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) goto put_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ret = dma_map_sg(drm->dev, rknpu_obj->sgt->sgl, rknpu_obj->sgt->nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) LOG_DEV_ERROR(drm->dev, "%s: dma map %zu fail\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) rknpu_obj->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) goto free_sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (rknpu_obj->flags & RKNPU_MEM_KERNEL_MAPPING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) rknpu_obj->cookie = vmap(rknpu_obj->pages, rknpu_obj->num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) VM_MAP, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!rknpu_obj->cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) LOG_ERROR("failed to vmap: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) goto unmap_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) rknpu_obj->kv_addr = rknpu_obj->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) dma_addr = sg_dma_address(rknpu_obj->sgt->sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) rknpu_obj->dma_addr = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) for_each_sg(rknpu_obj->sgt->sgl, s, rknpu_obj->sgt->nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) dma_addr += s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) phys = sg_phys(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) LOG_DEBUG(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) "gem pages alloc sgt[%d], dma_address: %pad, length: %#x, phys: %pad, virt: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) i, &dma_addr, s->length, &phys, sg_virt(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unmap_sg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) dma_unmap_sg(drm->dev, rknpu_obj->sgt->sgl, rknpu_obj->sgt->nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) free_sgt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) sg_free_table(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) kfree(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) put_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) drm_gem_put_pages(&rknpu_obj->base, rknpu_obj->pages, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void rknpu_gem_put_pages(struct rknpu_gem_object *rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (rknpu_obj->flags & RKNPU_MEM_KERNEL_MAPPING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) vunmap(rknpu_obj->kv_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) rknpu_obj->kv_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) dma_unmap_sg(drm->dev, rknpu_obj->sgt->sgl, rknpu_obj->sgt->nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) drm_gem_put_pages(&rknpu_obj->base, rknpu_obj->pages, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (rknpu_obj->sgt != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) sg_free_table(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) kfree(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static int rknpu_gem_alloc_buf(struct rknpu_gem_object *rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct rknpu_device *rknpu_dev = drm->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned int nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct sg_table *sgt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct scatterlist *s = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) gfp_t gfp_mask = GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int ret = -EINVAL, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (rknpu_obj->dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) LOG_DEBUG("buffer already allocated.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) rknpu_obj->dma_attrs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * if RKNPU_MEM_CONTIGUOUS, fully physically contiguous memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * region will be allocated else physically contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!(rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) rknpu_obj->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) // cacheable mapping or writecombine mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (rknpu_obj->flags & RKNPU_MEM_CACHEABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #ifdef DMA_ATTR_NON_CONSISTENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) rknpu_obj->dma_attrs |= DMA_ATTR_NON_CONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #ifdef DMA_ATTR_SYS_CACHE_ONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) rknpu_obj->dma_attrs |= DMA_ATTR_SYS_CACHE_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) } else if (rknpu_obj->flags & RKNPU_MEM_WRITE_COMBINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) rknpu_obj->dma_attrs |= DMA_ATTR_WRITE_COMBINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!(rknpu_obj->flags & RKNPU_MEM_KERNEL_MAPPING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) rknpu_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #ifdef DMA_ATTR_SKIP_ZEROING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!(rknpu_obj->flags & RKNPU_MEM_ZEROING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) rknpu_obj->dma_attrs |= DMA_ATTR_SKIP_ZEROING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #if RKNPU_GEM_ALLOC_FROM_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if ((rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) rknpu_dev->iommu_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return rknpu_gem_get_pages(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (rknpu_obj->flags & RKNPU_MEM_ZEROING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) gfp_mask |= __GFP_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!(rknpu_obj->flags & RKNPU_MEM_NON_DMA32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) gfp_mask &= ~__GFP_HIGHMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) gfp_mask |= __GFP_DMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) nr_pages = rknpu_obj->size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) rknpu_obj->pages = rknpu_gem_alloc_page(nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!rknpu_obj->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) LOG_ERROR("failed to allocate pages.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) rknpu_obj->cookie =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dma_alloc_attrs(drm->dev, rknpu_obj->size, &rknpu_obj->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) gfp_mask, rknpu_obj->dma_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!rknpu_obj->cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * when RKNPU_MEM_CONTIGUOUS and IOMMU is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * try to fallback to allocate non-contiguous buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!(rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) rknpu_dev->iommu_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) LOG_DEV_WARN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) drm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) "try to fallback to allocate non-contiguous %lu buffer.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) rknpu_obj->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) rknpu_obj->dma_attrs &= ~DMA_ATTR_FORCE_CONTIGUOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rknpu_obj->flags |= RKNPU_MEM_NON_CONTIGUOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) rknpu_obj->cookie =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dma_alloc_attrs(drm->dev, rknpu_obj->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) &rknpu_obj->dma_addr, gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) rknpu_obj->dma_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (!rknpu_obj->cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) LOG_DEV_ERROR(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) drm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) "failed to allocate non-contiguous %lu buffer.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) rknpu_obj->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) LOG_DEV_ERROR(drm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) "failed to allocate %lu buffer.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) rknpu_obj->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (rknpu_obj->flags & RKNPU_MEM_KERNEL_MAPPING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) rknpu_obj->kv_addr = rknpu_obj->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!sgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) goto err_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ret = dma_get_sgtable_attrs(drm->dev, sgt, rknpu_obj->cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) rknpu_obj->dma_addr, rknpu_obj->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) rknpu_obj->dma_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) LOG_DEV_ERROR(drm->dev, "failed to get sgtable.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) goto err_free_sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) for_each_sg(sgt->sgl, s, sgt->nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) sg_dma_address(s) = sg_phys(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) LOG_DEBUG("dma alloc sgt[%d], phys_address: %pad, length: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) i, &s->dma_address, s->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (drm_prime_sg_to_page_addr_arrays(sgt, rknpu_obj->pages, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) nr_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) LOG_DEV_ERROR(drm->dev, "invalid sgtable.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) goto err_free_sg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) rknpu_obj->sgt = sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) err_free_sg_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) err_free_sgt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kfree(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) err_free_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dma_free_attrs(drm->dev, rknpu_obj->size, rknpu_obj->cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) rknpu_obj->dma_addr, rknpu_obj->dma_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) rknpu_gem_free_page(rknpu_obj->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void rknpu_gem_free_buf(struct rknpu_gem_object *rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #if RKNPU_GEM_ALLOC_FROM_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct rknpu_device *rknpu_dev = drm->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (!rknpu_obj->dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) LOG_DEBUG("dma handle is invalid.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #if RKNPU_GEM_ALLOC_FROM_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if ((rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) rknpu_dev->iommu_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) rknpu_gem_put_pages(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) sg_free_table(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) kfree(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) dma_free_attrs(drm->dev, rknpu_obj->size, rknpu_obj->cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) rknpu_obj->dma_addr, rknpu_obj->dma_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) rknpu_gem_free_page(rknpu_obj->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) rknpu_obj->dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static int rknpu_gem_handle_create(struct drm_gem_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned int *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * allocate a id of idr table where the obj is registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * and handle has the id what user can see.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = drm_gem_handle_create(file_priv, obj, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) LOG_DEBUG("gem handle: %#x\n", *handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* drop reference from allocate - handle holds it now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) rknpu_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static int rknpu_gem_handle_destroy(struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned int handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return drm_gem_handle_delete(file_priv, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static struct rknpu_gem_object *rknpu_gem_init(struct drm_device *drm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct rknpu_gem_object *rknpu_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct drm_gem_object *obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) gfp_t gfp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) rknpu_obj = kzalloc(sizeof(*rknpu_obj), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) obj = &rknpu_obj->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ret = drm_gem_object_init(drm, obj, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) LOG_DEV_ERROR(drm->dev, "failed to initialize gem object\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) kfree(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) rknpu_obj->size = rknpu_obj->base.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) gfp_mask = mapping_gfp_mask(obj->filp->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (rknpu_obj->flags & RKNPU_MEM_ZEROING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) gfp_mask |= __GFP_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!(rknpu_obj->flags & RKNPU_MEM_NON_DMA32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) gfp_mask &= ~__GFP_HIGHMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) gfp_mask |= __GFP_DMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mapping_set_gfp_mask(obj->filp->f_mapping, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return rknpu_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void rknpu_gem_release(struct rknpu_gem_object *rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* release file pointer to gem object. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) drm_gem_object_release(&rknpu_obj->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) kfree(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int rknpu_gem_alloc_buf_with_sram(struct rknpu_gem_object *rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct rknpu_device *rknpu_dev = drm->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct iommu_domain *domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct rknpu_iommu_dma_cookie *cookie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct iova_domain *iovad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct scatterlist *s = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned long length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned long offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* iova map to sram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) domain = iommu_get_domain_for_dev(rknpu_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) LOG_ERROR("failed to get iommu domain!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) rknpu_obj->iova_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) iova_align(iovad, rknpu_obj->sram_size + rknpu_obj->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) rknpu_obj->iova_start = rknpu_iommu_dma_alloc_iova(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) domain, rknpu_obj->iova_size, dma_get_mask(drm->dev), drm->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!rknpu_obj->iova_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) LOG_ERROR("iommu_dma_alloc_iova failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) LOG_INFO("allocate iova start: %pad, size: %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) &rknpu_obj->iova_start, rknpu_obj->iova_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Overview SRAM + DDR map to IOVA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * --------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * sram_size: rknpu_obj->sram_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * - allocate from SRAM, this size value has been page-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * size: rknpu_obj->size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * - allocate from DDR pages, this size value has been page-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * iova_size: rknpu_obj->iova_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * - from iova_align(sram_size + size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * - it may be larger than the (sram_size + size), and the larger part is not mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * --------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * |<- sram_size ->| |<- - - - size - - - ->|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * +---------------+ +----------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * | SRAM | | DDR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * +---------------+ +----------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * | V | V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * +---------------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * | IOVA range |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * +---------------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * |<- - - - - - - iova_size - - - - - - ->|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) offset = rknpu_obj->sram_obj->range_start *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) rknpu_dev->sram_mm->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ret = iommu_map(domain, rknpu_obj->iova_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rknpu_dev->sram_start + offset, rknpu_obj->sram_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) IOMMU_READ | IOMMU_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) LOG_ERROR("sram iommu_map error: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) goto free_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) rknpu_obj->dma_addr = rknpu_obj->iova_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (rknpu_obj->size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) LOG_INFO("allocate sram size: %lu\n", rknpu_obj->sram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) rknpu_obj->pages = drm_gem_get_pages(&rknpu_obj->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (IS_ERR(rknpu_obj->pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ret = PTR_ERR(rknpu_obj->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) LOG_ERROR("failed to get pages: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto sram_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) rknpu_obj->num_pages = rknpu_obj->size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) rknpu_obj->sgt = drm_prime_pages_to_sg(drm, rknpu_obj->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rknpu_obj->num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) rknpu_obj->sgt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) drm_prime_pages_to_sg(rknpu_obj->pages, rknpu_obj->num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (IS_ERR(rknpu_obj->sgt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ret = PTR_ERR(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) LOG_ERROR("failed to allocate sgt: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto put_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) length = rknpu_obj->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) offset = rknpu_obj->iova_start + rknpu_obj->sram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) for_each_sg(rknpu_obj->sgt->sgl, s, rknpu_obj->sgt->nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) size = (length < s->length) ? length : s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ret = iommu_map(domain, offset, sg_phys(s), size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) IOMMU_READ | IOMMU_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) LOG_ERROR("ddr iommu_map error: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) goto sgl_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) length -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) offset += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) LOG_INFO("allocate size: %lu with sram size: %lu\n", rknpu_obj->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) rknpu_obj->sram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) sgl_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) iommu_unmap(domain, rknpu_obj->iova_start + rknpu_obj->sram_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) rknpu_obj->size - length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) sg_free_table(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) kfree(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) put_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) drm_gem_put_pages(&rknpu_obj->base, rknpu_obj->pages, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) sram_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) iommu_unmap(domain, rknpu_obj->iova_start, rknpu_obj->sram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) free_iova:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) rknpu_iommu_dma_free_iova(domain->iova_cookie, rknpu_obj->iova_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) rknpu_obj->iova_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static void rknpu_gem_free_buf_with_sram(struct rknpu_gem_object *rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct rknpu_device *rknpu_dev = drm->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct iommu_domain *domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) domain = iommu_get_domain_for_dev(rknpu_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) iommu_unmap(domain, rknpu_obj->iova_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) rknpu_obj->sram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (rknpu_obj->size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) iommu_unmap(domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) rknpu_obj->iova_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) rknpu_obj->sram_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) rknpu_obj->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) rknpu_iommu_dma_free_iova(domain->iova_cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) rknpu_obj->iova_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) rknpu_obj->iova_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (rknpu_obj->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) drm_gem_put_pages(&rknpu_obj->base, rknpu_obj->pages, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (rknpu_obj->sgt != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) sg_free_table(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) kfree(rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct rknpu_gem_object *rknpu_gem_object_create(struct drm_device *drm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned long sram_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct rknpu_device *rknpu_dev = drm->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct rknpu_gem_object *rknpu_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) size_t remain_ddr_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) LOG_DEV_ERROR(drm->dev, "invalid buffer size: %lu\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) remain_ddr_size = round_up(size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (!rknpu_dev->iommu_en && (flags & RKNPU_MEM_NON_CONTIGUOUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * when no IOMMU is available, all allocated buffers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * contiguous anyway, so drop RKNPU_MEM_NON_CONTIGUOUS flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) flags &= ~RKNPU_MEM_NON_CONTIGUOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) LOG_WARN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) "non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (IS_ENABLED(CONFIG_ROCKCHIP_RKNPU_SRAM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) (flags & RKNPU_MEM_TRY_ALLOC_SRAM) && rknpu_dev->sram_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) size_t sram_free_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) size_t real_sram_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (sram_size != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) sram_size = round_up(sram_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) rknpu_obj = rknpu_gem_init(drm, remain_ddr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (IS_ERR(rknpu_obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return rknpu_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* set memory type and cache attribute from user side. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) rknpu_obj->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) sram_free_size = rknpu_dev->sram_mm->free_chunks *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) rknpu_dev->sram_mm->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (sram_free_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) real_sram_size = remain_ddr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (sram_size != 0 && remain_ddr_size > sram_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) real_sram_size = sram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (real_sram_size > sram_free_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) real_sram_size = sram_free_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ret = rknpu_mm_alloc(rknpu_dev->sram_mm, real_sram_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) &rknpu_obj->sram_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) sram_free_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) rknpu_dev->sram_mm->free_chunks *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) rknpu_dev->sram_mm->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) LOG_WARN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) "mm allocate %zu failed, ret: %d, free size: %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) real_sram_size, ret, sram_free_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) real_sram_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (real_sram_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) rknpu_obj->sram_size = real_sram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ret = rknpu_gem_alloc_buf_with_sram(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) goto mm_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) remain_ddr_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (remain_ddr_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) rknpu_obj = rknpu_gem_init(drm, remain_ddr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (IS_ERR(rknpu_obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return rknpu_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* set memory type and cache attribute from user side. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) rknpu_obj->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ret = rknpu_gem_alloc_buf(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) goto gem_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) LOG_DEBUG(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) "created dma addr: %pad, cookie: %p, ddr size: %lu, sram size: %lu, attrs: %#lx, flags: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) &rknpu_obj->dma_addr, rknpu_obj->cookie, rknpu_obj->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) rknpu_obj->sram_size, rknpu_obj->dma_attrs, rknpu_obj->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return rknpu_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mm_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (IS_ENABLED(CONFIG_ROCKCHIP_RKNPU_SRAM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) rknpu_obj->sram_obj != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) rknpu_mm_free(rknpu_dev->sram_mm, rknpu_obj->sram_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) gem_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) rknpu_gem_release(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) void rknpu_gem_object_destroy(struct rknpu_gem_object *rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct drm_gem_object *obj = &rknpu_obj->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) LOG_DEBUG(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) "destroy dma addr: %pad, cookie: %p, size: %lu, attrs: %#lx, flags: %#x, handle count: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) &rknpu_obj->dma_addr, rknpu_obj->cookie, rknpu_obj->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) rknpu_obj->dma_attrs, rknpu_obj->flags, obj->handle_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * do not release memory region from exporter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * the region will be released by exporter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * once dmabuf's refcount becomes 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (obj->import_attach) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) drm_prime_gem_destroy(obj, rknpu_obj->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) rknpu_gem_free_page(rknpu_obj->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (IS_ENABLED(CONFIG_ROCKCHIP_RKNPU_SRAM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) rknpu_obj->sram_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct rknpu_device *rknpu_dev = obj->dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (rknpu_obj->sram_obj != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) rknpu_mm_free(rknpu_dev->sram_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) rknpu_obj->sram_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) rknpu_gem_free_buf_with_sram(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rknpu_gem_free_buf(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) rknpu_gem_release(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) int rknpu_gem_create_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct rknpu_mem_create *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct rknpu_gem_object *rknpu_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) rknpu_obj = rknpu_gem_object_find(file_priv, args->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (!rknpu_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) rknpu_obj = rknpu_gem_object_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dev, args->flags, args->size, args->sram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (IS_ERR(rknpu_obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return PTR_ERR(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ret = rknpu_gem_handle_create(&rknpu_obj->base, file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) &args->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) rknpu_gem_object_destroy(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) // rknpu_gem_object_get(&rknpu_obj->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) args->size = rknpu_obj->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) args->sram_size = rknpu_obj->sram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) args->obj_addr = (__u64)(uintptr_t)rknpu_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) args->dma_addr = rknpu_obj->dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) int rknpu_gem_map_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct rknpu_mem_map *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #if KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return rknpu_gem_dumb_map_offset(file_priv, dev, args->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) &args->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) &args->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int rknpu_gem_destroy_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct rknpu_gem_object *rknpu_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct rknpu_mem_destroy *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) rknpu_obj = rknpu_gem_object_find(file_priv, args->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) // rknpu_gem_object_put(&rknpu_obj->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return rknpu_gem_handle_destroy(file_priv, args->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) #if RKNPU_GEM_ALLOC_FROM_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * __vm_map_pages - maps range of kernel pages into user vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * @pages: pointer to array of source kernel pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * @num: number of pages in page array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * @offset: user's requested vm_pgoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * This allows drivers to map range of kernel pages into a user vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * Return: 0 on success and error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unsigned long num, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) unsigned long count = vma_pages(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) unsigned long uaddr = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int ret = -EINVAL, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* Fail if the user requested offset is beyond the end of the object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (offset >= num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* Fail if the user requested size exceeds available object size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (count > num - offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ret = vm_insert_page(vma, uaddr, pages[offset + i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) uaddr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static int rknpu_gem_mmap_pages(struct rknpu_gem_object *rknpu_obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) vma->vm_flags |= VM_MIXEDMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ret = __vm_map_pages(vma, rknpu_obj->pages, rknpu_obj->num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) vma->vm_pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) LOG_DEV_ERROR(drm->dev, "failed to map pages into vma: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static int rknpu_gem_mmap_buffer(struct rknpu_gem_object *rknpu_obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) #if RKNPU_GEM_ALLOC_FROM_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct rknpu_device *rknpu_dev = drm->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned long vm_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * the whole buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) vma->vm_flags &= ~VM_PFNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) vma->vm_pgoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) vm_size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /* check if user-requested size is valid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (vm_size > rknpu_obj->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (rknpu_obj->sram_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) unsigned long offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) unsigned long num_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) vma->vm_flags |= VM_MIXEDMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) offset = rknpu_obj->sram_obj->range_start *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) rknpu_dev->sram_mm->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) vma->vm_pgoff = __phys_to_pfn(rknpu_dev->sram_start + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) rknpu_obj->sram_size, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (rknpu_obj->size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) offset = rknpu_obj->sram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) num_pages = (vm_size - rknpu_obj->sram_size) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) for (i = 0; i < num_pages; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) ret = vm_insert_page(vma, vma->vm_start + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) rknpu_obj->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) offset += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) #if RKNPU_GEM_ALLOC_FROM_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if ((rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) rknpu_dev->iommu_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return rknpu_gem_mmap_pages(rknpu_obj, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ret = dma_mmap_attrs(drm->dev, vma, rknpu_obj->cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) rknpu_obj->dma_addr, rknpu_obj->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) rknpu_obj->dma_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) LOG_DEV_ERROR(drm->dev, "failed to mmap, ret: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) void rknpu_gem_free_object(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) rknpu_gem_object_destroy(to_rknpu_obj(obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) int rknpu_gem_dumb_create(struct drm_file *file_priv, struct drm_device *drm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct drm_mode_create_dumb *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct rknpu_device *rknpu_dev = drm->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct rknpu_gem_object *rknpu_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * allocate memory to be used for framebuffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * - this callback would be called by user application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * with DRM_IOCTL_MODE_CREATE_DUMB command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) args->pitch = args->width * ((args->bpp + 7) / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) args->size = args->pitch * args->height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (rknpu_dev->iommu_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) flags = RKNPU_MEM_NON_CONTIGUOUS | RKNPU_MEM_WRITE_COMBINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) flags = RKNPU_MEM_CONTIGUOUS | RKNPU_MEM_WRITE_COMBINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) rknpu_obj = rknpu_gem_object_create(drm, flags, args->size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (IS_ERR(rknpu_obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) LOG_DEV_ERROR(drm->dev, "gem object allocate failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return PTR_ERR(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ret = rknpu_gem_handle_create(&rknpu_obj->base, file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) &args->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) rknpu_gem_object_destroy(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) #if KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) int rknpu_gem_dumb_map_offset(struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct drm_device *drm, uint32_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) uint64_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct rknpu_gem_object *rknpu_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct drm_gem_object *obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) rknpu_obj = rknpu_gem_object_find(file_priv, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (!rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* Don't allow imported objects to be mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) obj = &rknpu_obj->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (obj->import_attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ret = drm_gem_create_mmap_offset(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) *offset = drm_vma_node_offset_addr(&obj->vma_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) #if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) vm_fault_t rknpu_gem_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct drm_gem_object *obj = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) unsigned long pfn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) pgoff_t page_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (page_offset >= (rknpu_obj->size >> PAGE_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) LOG_DEV_ERROR(drm->dev, "invalid page offset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) pfn = page_to_pfn(rknpu_obj->pages[page_offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return vmf_insert_mixed(vma, vmf->address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) __pfn_to_pfn_t(pfn, PFN_DEV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) #elif KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) int rknpu_gem_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct drm_gem_object *obj = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) unsigned long pfn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) pgoff_t page_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (page_offset >= (rknpu_obj->size >> PAGE_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) LOG_DEV_ERROR(drm->dev, "invalid page offset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) pfn = page_to_pfn(rknpu_obj->pages[page_offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) case -ERESTARTSYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) case -EINTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) int rknpu_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct drm_gem_object *obj = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct drm_device *drm = rknpu_obj->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) unsigned long pfn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) pgoff_t page_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (page_offset >= (rknpu_obj->size >> PAGE_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) LOG_DEV_ERROR(drm->dev, "invalid page offset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) pfn = page_to_pfn(rknpu_obj->pages[page_offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) __pfn_to_pfn_t(pfn, PFN_DEV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) case -ERESTARTSYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) case -EINTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static int rknpu_gem_mmap_obj(struct drm_gem_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) LOG_DEBUG("flags: %#x\n", rknpu_obj->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* non-cacheable as default. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (rknpu_obj->flags & RKNPU_MEM_CACHEABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) } else if (rknpu_obj->flags & RKNPU_MEM_WRITE_COMBINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) vma->vm_page_prot =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) vma->vm_page_prot =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) pgprot_noncached(vm_get_page_prot(vma->vm_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ret = rknpu_gem_mmap_buffer(rknpu_obj, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) goto err_close_vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) err_close_vm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) drm_gem_vm_close(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) int rknpu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct drm_gem_object *obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /* set vm_area_struct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ret = drm_gem_mmap(filp, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) LOG_ERROR("failed to mmap, ret: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) obj = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (obj->import_attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return dma_buf_mmap(obj->dma_buf, vma, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return rknpu_gem_mmap_obj(obj, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* low-level interface prime helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) #if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct drm_gem_object *rknpu_gem_prime_import(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct dma_buf *dma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct sg_table *rknpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) int npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) npages = rknpu_obj->size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) #if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return drm_prime_pages_to_sg(obj->dev, rknpu_obj->pages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return drm_prime_pages_to_sg(rknpu_obj->pages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct drm_gem_object *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) rknpu_gem_prime_import_sg_table(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct dma_buf_attachment *attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct sg_table *sgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct rknpu_gem_object *rknpu_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) int npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) rknpu_obj = rknpu_gem_init(dev, attach->dmabuf->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (IS_ERR(rknpu_obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ret = PTR_ERR(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) rknpu_obj->dma_addr = sg_dma_address(sgt->sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) npages = rknpu_obj->size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) rknpu_obj->pages = rknpu_gem_alloc_page(npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!rknpu_obj->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ret = drm_prime_sg_to_page_addr_arrays(sgt, rknpu_obj->pages, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) goto err_free_large;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) rknpu_obj->sgt = sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (sgt->nents == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /* always physically continuous memory if sgt->nents is 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) rknpu_obj->flags |= RKNPU_MEM_CONTIGUOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * this case could be CONTIG or NONCONTIG type but for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * sets NONCONTIG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * TODO. we have to find a way that exporter can notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * the type of its own buffer to importer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) rknpu_obj->flags |= RKNPU_MEM_NON_CONTIGUOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return &rknpu_obj->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) err_free_large:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) rknpu_gem_free_page(rknpu_obj->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) rknpu_gem_release(rknpu_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) void *rknpu_gem_prime_vmap(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!rknpu_obj->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return vmap(rknpu_obj->pages, rknpu_obj->num_pages, VM_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) void rknpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) vunmap(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int rknpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) ret = drm_gem_mmap_obj(obj, obj->size, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return rknpu_gem_mmap_obj(obj, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int rknpu_gem_sync_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct rknpu_gem_object *rknpu_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct rknpu_mem_sync *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) unsigned long length, offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) unsigned long sg_left, size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) unsigned long len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) rknpu_obj = (struct rknpu_gem_object *)(uintptr_t)args->obj_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (!rknpu_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (!(rknpu_obj->flags & RKNPU_MEM_CACHEABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (!(rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (args->flags & RKNPU_MEM_SYNC_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) dma_sync_single_range_for_device(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) dev->dev, rknpu_obj->dma_addr, args->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) args->size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (args->flags & RKNPU_MEM_SYNC_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) dma_sync_single_range_for_cpu(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) rknpu_obj->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) args->offset, args->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) length = args->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) offset = args->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (IS_ENABLED(CONFIG_ROCKCHIP_RKNPU_SRAM) && rknpu_obj->sram_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct drm_gem_object *obj = &rknpu_obj->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) struct rknpu_device *rknpu_dev = obj->dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) unsigned long sram_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) rknpu_obj->sram_obj->range_start *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) rknpu_dev->sram_mm->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if ((offset + length) <= rknpu_obj->sram_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) __dma_map_area(rknpu_dev->sram_base_io +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) offset + sram_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) length, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) __dma_unmap_area(rknpu_dev->sram_base_io +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) offset + sram_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) length, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) } else if (offset >= rknpu_obj->sram_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) offset -= rknpu_obj->sram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) unsigned long sram_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) rknpu_obj->sram_size - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) __dma_map_area(rknpu_dev->sram_base_io +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) offset + sram_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) sram_length, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) __dma_unmap_area(rknpu_dev->sram_base_io +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) offset + sram_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) sram_length, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) length -= sram_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) for_each_sg(rknpu_obj->sgt->sgl, sg, rknpu_obj->sgt->nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) len += sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (len <= offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) sg_left = len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) size = (length < sg_left) ? length : sg_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (args->flags & RKNPU_MEM_SYNC_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) dma_sync_sg_for_device(dev->dev, sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (args->flags & RKNPU_MEM_SYNC_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) dma_sync_sg_for_cpu(dev->dev, sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) offset += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) length -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }