^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Xen dma-buf functionality for gntdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <xen/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "gntdev-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "gntdev-dmabuf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #ifndef GRANT_INVALID_REF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Note on usage of grant reference 0 as invalid grant reference:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * grant reference 0 is valid, but never exposed to a driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * because of the fact it is already in use/reserved by the PV console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define GRANT_INVALID_REF 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct gntdev_dmabuf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct gntdev_dmabuf_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct dma_buf *dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Exported buffers are reference counted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct kref refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct gntdev_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) } exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* Granted references of the imported buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) grant_ref_t *refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Scatter-gather table of the imported buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct sg_table *sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* dma-buf attachment of the imported buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct dma_buf_attachment *attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) } imp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Number of pages this buffer has. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Pages of this buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct gntdev_dmabuf_wait_obj {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct gntdev_dmabuf *gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct gntdev_dmabuf_attachment {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct sg_table *sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct gntdev_dmabuf_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* List of exported DMA buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct list_head exp_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* List of wait objects. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct list_head exp_wait_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* List of imported DMA buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct list_head imp_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* This is the lock which protects dma_buf_xxx lists. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * We reference this file while exporting dma-bufs, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * the grant device context is not destroyed while there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * external users alive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct file *filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* DMA buffer export support. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Implementation of wait for exported DMA buffer to be released. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void dmabuf_exp_release(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static struct gntdev_dmabuf_wait_obj *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct gntdev_dmabuf *gntdev_dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct gntdev_dmabuf_wait_obj *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) obj = kzalloc(sizeof(*obj), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) init_completion(&obj->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) obj->gntdev_dmabuf = gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) list_add(&obj->next, &priv->exp_wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Put our reference and wait for gntdev_dmabuf's release to fire. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct gntdev_dmabuf_wait_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) list_del(&obj->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) kfree(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u32 wait_to_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (wait_for_completion_timeout(&obj->completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) msecs_to_jiffies(wait_to_ms)) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct gntdev_dmabuf *gntdev_dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct gntdev_dmabuf_wait_obj *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) list_for_each_entry(obj, &priv->exp_wait_list, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (obj->gntdev_dmabuf == gntdev_dmabuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) complete_all(&obj->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static struct gntdev_dmabuf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (gntdev_dmabuf->fd == fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) pr_debug("Found gntdev_dmabuf in the wait list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) kref_get(&gntdev_dmabuf->u.exp.refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ret = gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int wait_to_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct gntdev_dmabuf *gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct gntdev_dmabuf_wait_obj *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) pr_debug("Will wait for dma-buf with fd %d\n", fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Try to find the DMA buffer: if not found means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * either the buffer has already been released or file descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * provided is wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (IS_ERR(gntdev_dmabuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return PTR_ERR(gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * gntdev_dmabuf still exists and is reference count locked by us now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * so prepare to wait: allocate wait object and add it to the wait list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * so we can find it on release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (IS_ERR(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return PTR_ERR(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) dmabuf_exp_wait_obj_free(priv, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* DMA buffer export support. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static struct sg_table *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct sg_table *sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!sgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) nr_pages << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) kfree(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct dma_buf_attachment *attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!gntdev_dmabuf_attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) gntdev_dmabuf_attach->dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) attach->priv = gntdev_dmabuf_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct dma_buf_attachment *attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (gntdev_dmabuf_attach) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (sgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (gntdev_dmabuf_attach->dir != DMA_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) dma_unmap_sgtable(attach->dev, sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) gntdev_dmabuf_attach->dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) DMA_ATTR_SKIP_CPU_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) kfree(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) kfree(gntdev_dmabuf_attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) attach->priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static struct sg_table *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct sg_table *sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) attach->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (dir == DMA_NONE || !gntdev_dmabuf_attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* Return the cached mapping when possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (gntdev_dmabuf_attach->dir == dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return gntdev_dmabuf_attach->sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Two mappings with different directions for the same attachment are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * not allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (gntdev_dmabuf_attach->dir != DMA_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) gntdev_dmabuf->nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (!IS_ERR(sgt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (dma_map_sgtable(attach->dev, sgt, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) DMA_ATTR_SKIP_CPU_SYNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) kfree(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) sgt = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) gntdev_dmabuf_attach->sgt = sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) gntdev_dmabuf_attach->dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (IS_ERR(sgt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) pr_debug("Failed to map sg table for dev %p\n", attach->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct sg_table *sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void dmabuf_exp_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct gntdev_dmabuf *gntdev_dmabuf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) list_del(&gntdev_dmabuf->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) fput(gntdev_dmabuf->priv->filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) kfree(gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct gntdev_grant_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) list_del(&map->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) gntdev_put_map(NULL /* already removed */, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) gntdev_dmabuf->u.exp.map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static const struct dma_buf_ops dmabuf_exp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) .attach = dmabuf_exp_ops_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .detach = dmabuf_exp_ops_detach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) .release = dmabuf_exp_ops_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct gntdev_dmabuf_export_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct gntdev_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct gntdev_dmabuf_priv *dmabuf_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) u32 fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct gntdev_dmabuf *gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (!gntdev_dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) kref_init(&gntdev_dmabuf->u.exp.refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) gntdev_dmabuf->priv = args->dmabuf_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) gntdev_dmabuf->nr_pages = args->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) gntdev_dmabuf->pages = args->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) gntdev_dmabuf->u.exp.priv = args->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) gntdev_dmabuf->u.exp.map = args->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) exp_info.exp_name = KBUILD_MODNAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (args->dev->driver && args->dev->driver->owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) exp_info.owner = args->dev->driver->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) exp_info.owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) exp_info.ops = &dmabuf_exp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) exp_info.size = args->count << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) exp_info.flags = O_RDWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) exp_info.priv = gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (IS_ERR(gntdev_dmabuf->dmabuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ret = PTR_ERR(gntdev_dmabuf->dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) gntdev_dmabuf->dmabuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) gntdev_dmabuf->fd = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) args->fd = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) pr_debug("Exporting DMA buffer with fd %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) mutex_lock(&args->dmabuf_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) mutex_unlock(&args->dmabuf_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) get_file(gntdev_dmabuf->priv->filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (gntdev_dmabuf->dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) dma_buf_put(gntdev_dmabuf->dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) kfree(gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static struct gntdev_grant_map *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (unlikely(gntdev_test_page_count(count)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) map = gntdev_alloc_map(priv, count, dmabuf_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int count, u32 domid, u32 *refs, u32 *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct gntdev_dmabuf_export_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) map->grants[i].domid = domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) map->grants[i].ref = refs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) gntdev_add_map(priv, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) map->flags |= GNTMAP_host_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) #if defined(CONFIG_X86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) map->flags |= GNTMAP_device_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ret = gntdev_map_grant_pages(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) args.priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) args.map = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) args.dev = priv->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) args.dmabuf_priv = priv->dmabuf_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) args.count = map->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) args.pages = map->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ret = dmabuf_exp_from_pages(&args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *fd = args.fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dmabuf_exp_remove_map(priv, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* DMA buffer import support. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int count, int domid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) grant_ref_t priv_gref_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ret = gnttab_alloc_grant_references(count, &priv_gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pr_debug("Cannot allocate grant references, ret %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int cur_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (cur_ref < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ret = cur_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) pr_debug("Cannot claim grant reference, ret %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) gnttab_grant_foreign_access_ref(cur_ref, domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) xen_page_to_gfn(pages[i]), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) refs[i] = cur_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) gnttab_free_grant_references(priv_gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (refs[i] != GRANT_INVALID_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) gnttab_end_foreign_access(refs[i], 0, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) kfree(gntdev_dmabuf->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) kfree(gntdev_dmabuf->u.imp.refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) kfree(gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct gntdev_dmabuf *gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (!gntdev_dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) goto fail_no_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) gntdev_dmabuf->u.imp.refs = kcalloc(count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) sizeof(gntdev_dmabuf->u.imp.refs[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!gntdev_dmabuf->u.imp.refs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) gntdev_dmabuf->pages = kcalloc(count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) sizeof(gntdev_dmabuf->pages[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (!gntdev_dmabuf->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) gntdev_dmabuf->nr_pages = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dmabuf_imp_free_storage(gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) fail_no_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static struct gntdev_dmabuf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) int fd, int count, int domid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct gntdev_dmabuf *gntdev_dmabuf, *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct dma_buf *dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct dma_buf_attachment *attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct sg_table *sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct sg_page_iter sg_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) dma_buf = dma_buf_get(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (IS_ERR(dma_buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return ERR_CAST(dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (IS_ERR(gntdev_dmabuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ret = gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) goto fail_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) gntdev_dmabuf->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) gntdev_dmabuf->fd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) attach = dma_buf_attach(dma_buf, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (IS_ERR(attach)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ret = ERR_CAST(attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) goto fail_free_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) gntdev_dmabuf->u.imp.attach = attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (IS_ERR(sgt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ret = ERR_CAST(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) goto fail_detach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Check that we have zero offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (sgt->sgl->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ret = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) sgt->sgl->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto fail_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /* Check number of pages that imported buffer has. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ret = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) attach->dmabuf->size, gntdev_dmabuf->nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) goto fail_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) gntdev_dmabuf->u.imp.sgt = sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* Now convert sgt to array of pages and check for page validity. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) for_each_sgtable_page(sgt, &sg_iter, 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct page *page = sg_page_iter_page(&sg_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Check if page is valid: this can happen if we are given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * a page from VRAM or other resources which are not backed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * by a struct page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!pfn_valid(page_to_pfn(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ret = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) goto fail_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) gntdev_dmabuf->pages[i++] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) gntdev_dmabuf->u.imp.refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) count, domid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (IS_ERR(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) goto fail_end_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) pr_debug("Imported DMA buffer with fd %d\n", fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) list_add(&gntdev_dmabuf->next, &priv->imp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) fail_end_access:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) fail_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) fail_detach:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) dma_buf_detach(dma_buf, attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) fail_free_obj:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) dmabuf_imp_free_storage(gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) fail_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) dma_buf_put(dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * Find the hyper dma-buf by its file descriptor and remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * it from the buffer's list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static struct gntdev_dmabuf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (gntdev_dmabuf->fd == fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) pr_debug("Found gntdev_dmabuf in the import list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ret = gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) list_del(&gntdev_dmabuf->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct gntdev_dmabuf *gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct dma_buf_attachment *attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct dma_buf *dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (IS_ERR(gntdev_dmabuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return PTR_ERR(gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) pr_debug("Releasing DMA buffer with fd %d\n", fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) gntdev_dmabuf->nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) attach = gntdev_dmabuf->u.imp.attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (gntdev_dmabuf->u.imp.sgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) dma_buf = attach->dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dma_buf_detach(attach->dmabuf, attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) dma_buf_put(dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) dmabuf_imp_free_storage(gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct gntdev_dmabuf *q, *gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dmabuf_imp_release(priv, gntdev_dmabuf->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* DMA buffer IOCTL support. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct ioctl_gntdev_dmabuf_exp_from_refs op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) u32 *refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (use_ptemod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) use_ptemod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (copy_from_user(&op, u, sizeof(op)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (unlikely(gntdev_test_page_count(op.count)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!refs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) op.domid, refs, &op.fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (copy_to_user(u, &op, sizeof(op)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) kfree(refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct ioctl_gntdev_dmabuf_exp_wait_released op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (copy_from_user(&op, u, sizeof(op)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) op.wait_to_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct ioctl_gntdev_dmabuf_imp_to_refs op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct gntdev_dmabuf *gntdev_dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (copy_from_user(&op, u, sizeof(op)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (unlikely(gntdev_test_page_count(op.count)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) priv->dma_dev, op.fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) op.count, op.domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (IS_ERR(gntdev_dmabuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return PTR_ERR(gntdev_dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) sizeof(*u->refs) * op.count) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) dmabuf_imp_release(priv->dmabuf_priv, op.fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct ioctl_gntdev_dmabuf_imp_release __user *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct ioctl_gntdev_dmabuf_imp_release op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (copy_from_user(&op, u, sizeof(op)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct gntdev_dmabuf_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) priv = kzalloc(sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) mutex_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) INIT_LIST_HEAD(&priv->exp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) INIT_LIST_HEAD(&priv->exp_wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) INIT_LIST_HEAD(&priv->imp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) priv->filp = filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) dmabuf_imp_release_all(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }