^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NVIDIA Tegra DRM GEM helper functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 Sascha Hauer, Pengutronix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Based on the GEM/CMA helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (c) 2011 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <drm/drm_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <drm/drm_prime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <drm/tegra_drm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "drm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "gem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static void tegra_bo_put(struct host1x_bo *bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct tegra_bo *obj = host1x_to_tegra_bo(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) drm_gem_object_put(&obj->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* XXX move this into lib/scatterlist.c? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned int nents, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct scatterlist *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) err = sg_alloc_table(sgt, nents, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) dst = sgt->sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) for (i = 0; i < nents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) sg_set_page(dst, sg_page(sg), sg->length, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) dst = sg_next(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) dma_addr_t *phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct tegra_bo *obj = host1x_to_tegra_bo(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct sg_table *sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * If we've manually mapped the buffer object through the IOMMU, make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * sure to return the IOVA address of our mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Similarly, for buffers that have been allocated by the DMA API the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * physical address can be used for devices that are not attached to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * an IOMMU. For these devices, callers must pass a valid pointer via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * the @phys argument.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Imported buffers were also already mapped at import time, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * existing mapping can be reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (phys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *phys = obj->iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * If we don't have a mapping for this buffer yet, return an SG table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * so that host1x can do the mapping for us via the DMA API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!sgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (obj->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * If the buffer object was allocated from the explicit IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * API code paths, construct an SG table from the pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) 0, obj->gem.size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) } else if (obj->sgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * If the buffer object already has an SG table but no pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * were allocated for it, it means the buffer was imported and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * the SG table needs to be copied to avoid overwriting any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * other potential users of the original SG table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) obj->sgt->orig_nents, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * If the buffer object had no pages allocated and if it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * not imported, it had to be allocated with the DMA API, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * the DMA API helper can be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) obj->gem.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) kfree(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (sgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) kfree(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void *tegra_bo_mmap(struct host1x_bo *bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct tegra_bo *obj = host1x_to_tegra_bo(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (obj->vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return obj->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) else if (obj->gem.import_attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return dma_buf_vmap(obj->gem.import_attach->dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return vmap(obj->pages, obj->num_pages, VM_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) pgprot_writecombine(PAGE_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct tegra_bo *obj = host1x_to_tegra_bo(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (obj->vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) else if (obj->gem.import_attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) vunmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct tegra_bo *obj = host1x_to_tegra_bo(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) drm_gem_object_get(&obj->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static const struct host1x_bo_ops tegra_bo_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) .get = tegra_bo_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) .put = tegra_bo_put,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) .pin = tegra_bo_pin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) .unpin = tegra_bo_unpin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) .mmap = tegra_bo_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) .munmap = tegra_bo_munmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int prot = IOMMU_READ | IOMMU_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (bo->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!bo->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) mutex_lock(&tegra->mm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) err = drm_mm_insert_node_generic(&tegra->mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) bo->iova = bo->mm->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (!bo->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) dev_err(tegra->drm->dev, "failed to map buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) goto remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) mutex_unlock(&tegra->mm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) drm_mm_remove_node(bo->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) mutex_unlock(&tegra->mm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) kfree(bo->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!bo->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) mutex_lock(&tegra->mm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) iommu_unmap(tegra->domain, bo->iova, bo->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) drm_mm_remove_node(bo->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) mutex_unlock(&tegra->mm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) kfree(bo->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct tegra_bo *bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) bo = kzalloc(sizeof(*bo), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) host1x_bo_init(&bo->base, &tegra_bo_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) size = round_up(size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) err = drm_gem_object_init(drm, &bo->gem, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) err = drm_gem_create_mmap_offset(&bo->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) drm_gem_object_release(&bo->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) kfree(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (bo->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) drm_gem_put_pages(&bo->gem, bo->pages, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) sg_free_table(bo->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) kfree(bo->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) } else if (bo->vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) bo->pages = drm_gem_get_pages(&bo->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (IS_ERR(bo->pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return PTR_ERR(bo->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) bo->num_pages = bo->gem.size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (IS_ERR(bo->sgt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) err = PTR_ERR(bo->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) goto put_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) goto free_sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) free_sgt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) sg_free_table(bo->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) kfree(bo->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) put_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) drm_gem_put_pages(&bo->gem, bo->pages, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct tegra_drm *tegra = drm->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (tegra->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) err = tegra_bo_get_pages(drm, bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) err = tegra_bo_iommu_map(tegra, bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) tegra_bo_free(drm, bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) size_t size = bo->gem.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) GFP_KERNEL | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (!bo->vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dev_err(drm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) "failed to allocate buffer of size %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct tegra_bo *bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) bo = tegra_bo_alloc_object(drm, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (IS_ERR(bo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) err = tegra_bo_alloc(drm, bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (flags & DRM_TEGRA_GEM_CREATE_TILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) bo->flags |= TEGRA_BO_BOTTOM_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) drm_gem_object_release(&bo->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) kfree(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct drm_device *drm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) u32 *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct tegra_bo *bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) bo = tegra_bo_create(drm, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (IS_ERR(bo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) err = drm_gem_handle_create(file, &bo->gem, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) tegra_bo_free_object(&bo->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) drm_gem_object_put(&bo->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct dma_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct tegra_drm *tegra = drm->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct dma_buf_attachment *attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct tegra_bo *bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) bo = tegra_bo_alloc_object(drm, buf->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (IS_ERR(bo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) attach = dma_buf_attach(buf, drm->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (IS_ERR(attach)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) err = PTR_ERR(attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) get_dma_buf(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (IS_ERR(bo->sgt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) err = PTR_ERR(bo->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) goto detach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (tegra->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) err = tegra_bo_iommu_map(tegra, bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) goto detach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) bo->gem.import_attach = attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) detach:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!IS_ERR_OR_NULL(bo->sgt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dma_buf_detach(buf, attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dma_buf_put(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) drm_gem_object_release(&bo->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) kfree(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) void tegra_bo_free_object(struct drm_gem_object *gem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct tegra_drm *tegra = gem->dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct tegra_bo *bo = to_tegra_bo(gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (tegra->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) tegra_bo_iommu_unmap(tegra, bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (gem->import_attach) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) drm_prime_gem_destroy(gem, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) tegra_bo_free(gem->dev, bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) drm_gem_object_release(gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) kfree(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct drm_mode_create_dumb *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct tegra_drm *tegra = drm->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct tegra_bo *bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) args->pitch = round_up(min_pitch, tegra->pitch_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) args->size = args->pitch * args->height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) &args->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (IS_ERR(bo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return PTR_ERR(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct drm_gem_object *gem = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct tegra_bo *bo = to_tegra_bo(gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pgoff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (!bo->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) page = bo->pages[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return vmf_insert_page(vma, vmf->address, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) const struct vm_operations_struct tegra_bo_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .fault = tegra_bo_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) .open = drm_gem_vm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) .close = drm_gem_vm_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct tegra_bo *bo = to_tegra_bo(gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (!bo->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned long vm_pgoff = vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * and set the vm_pgoff (used as a fake buffer offset by DRM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * to 0 as we want to map the whole buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) vma->vm_flags &= ~VM_PFNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) vma->vm_pgoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) gem->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) drm_gem_vm_close(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) vma->vm_pgoff = vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) pgprot_t prot = vm_get_page_prot(vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) vma->vm_flags |= VM_MIXEDMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) vma->vm_flags &= ~VM_PFNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) vma->vm_page_prot = pgprot_writecombine(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct drm_gem_object *gem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) err = drm_gem_mmap(file, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) gem = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return __tegra_gem_mmap(gem, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static struct sg_table *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct drm_gem_object *gem = attach->dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct tegra_bo *bo = to_tegra_bo(gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct sg_table *sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!sgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (bo->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 0, gem->size, GFP_KERNEL) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) gem->size) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (dma_map_sgtable(attach->dev, sgt, dir, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) kfree(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct sg_table *sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct drm_gem_object *gem = attach->dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct tegra_bo *bo = to_tegra_bo(gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (bo->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) dma_unmap_sgtable(attach->dev, sgt, dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) kfree(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static void tegra_gem_prime_release(struct dma_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) drm_gem_dmabuf_release(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct drm_gem_object *gem = buf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct tegra_bo *bo = to_tegra_bo(gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct drm_device *drm = gem->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (bo->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct drm_gem_object *gem = buf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct tegra_bo *bo = to_tegra_bo(gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct drm_device *drm = gem->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (bo->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct drm_gem_object *gem = buf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) err = drm_gem_mmap_obj(gem, gem->size, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return __tegra_gem_mmap(gem, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static void *tegra_gem_prime_vmap(struct dma_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct drm_gem_object *gem = buf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct tegra_bo *bo = to_tegra_bo(gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return bo->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) .map_dma_buf = tegra_gem_prime_map_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) .release = tegra_gem_prime_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) .end_cpu_access = tegra_gem_prime_end_cpu_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) .mmap = tegra_gem_prime_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) .vmap = tegra_gem_prime_vmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) .vunmap = tegra_gem_prime_vunmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) exp_info.exp_name = KBUILD_MODNAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) exp_info.owner = gem->dev->driver->fops->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) exp_info.ops = &tegra_gem_prime_dmabuf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) exp_info.size = gem->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) exp_info.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) exp_info.priv = gem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return drm_gem_dmabuf_export(gem->dev, &exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct dma_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct tegra_bo *bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct drm_gem_object *gem = buf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (gem->dev == drm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) drm_gem_object_get(gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return gem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) bo = tegra_bo_import(drm, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (IS_ERR(bo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return ERR_CAST(bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return &bo->gem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }