^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * gntdev.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Device for accessing (in user-space) pages that have been granted by other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2006-2007, D G Murray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This program is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * You should have received a copy of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * along with this program; if not, write to the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <xen/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <xen/balloon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <xen/gntdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "gntdev-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #ifdef CONFIG_XEN_GNTDEV_DMABUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "gntdev-dmabuf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) "Gerd Hoffmann <kraxel@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) MODULE_DESCRIPTION("User-space granted page access driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static unsigned int limit = 64*1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) module_param(limit, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) MODULE_PARM_DESC(limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) "Maximum number of grants that may be mapped by one mapping request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int use_ptemod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static int unmap_grant_pages(struct gntdev_grant_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int offset, int pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static struct miscdevice gntdev_miscdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* ------------------------------------------------------------------ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) bool gntdev_test_page_count(unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return !count || count > limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static void gntdev_print_maps(struct gntdev_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) char *text, int text_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) pr_debug("%s: maps list (priv %p)\n", __func__, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) list_for_each_entry(map, &priv->maps, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) pr_debug(" index %2d, count %2d %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) map->index, map->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) map->index == text_index && text ? text : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void gntdev_free_map(struct gntdev_grant_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (map == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (map->dma_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct gnttab_dma_alloc_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) args.dev = map->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) args.nr_pages = map->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) args.pages = map->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) args.frames = map->frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) args.vaddr = map->dma_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) args.dev_bus_addr = map->dma_bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) gnttab_dma_free_pages(&args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (map->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) gnttab_free_pages(map->count, map->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) kvfree(map->frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) kvfree(map->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) kvfree(map->grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) kvfree(map->map_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) kvfree(map->unmap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) kvfree(map->kmap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) kvfree(map->kunmap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int dma_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct gntdev_grant_map *add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) add = kzalloc(sizeof(*add), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (NULL == add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) add->grants = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) add->map_ops = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) add->kmap_ops = kvcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) add->kunmap_ops = kvcalloc(count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) sizeof(add->kunmap_ops[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (NULL == add->grants ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) NULL == add->map_ops ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) NULL == add->unmap_ops ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) NULL == add->kmap_ops ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) NULL == add->kunmap_ops ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) NULL == add->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) add->dma_flags = dma_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * Check if this mapping is requested to be backed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * by a DMA buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct gnttab_dma_alloc_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) add->frames = kvcalloc(count, sizeof(add->frames[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!add->frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Remember the device, so we can free DMA memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) add->dma_dev = priv->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) args.dev = priv->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) args.nr_pages = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) args.pages = add->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) args.frames = add->frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (gnttab_dma_alloc_pages(&args))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) add->dma_vaddr = args.vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) add->dma_bus_addr = args.dev_bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (gnttab_alloc_pages(count, add->pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) add->map_ops[i].handle = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) add->unmap_ops[i].handle = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) add->kmap_ops[i].handle = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) add->kunmap_ops[i].handle = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) add->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) add->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) refcount_set(&add->users, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) gntdev_free_map(add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) list_for_each_entry(map, &priv->maps, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (add->index + add->count < map->index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) list_add_tail(&add->next, &map->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) add->index = map->index + map->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) list_add_tail(&add->next, &priv->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) gntdev_print_maps(priv, "[new]", add->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int index, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) list_for_each_entry(map, &priv->maps, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (map->index != index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (count && map->count != count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!refcount_dec_and_test(&map->users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (map->pages && !use_ptemod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unmap_grant_pages(map, 0, map->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) notify_remote_via_evtchn(map->notify.event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) evtchn_put(map->notify.event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) gntdev_free_map(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* ------------------------------------------------------------------ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct gntdev_grant_map *map = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) u64 pte_maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) BUG_ON(pgnr >= map->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pte_maddr = arbitrary_virt_to_machine(pte).maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Set the PTE as special to force get_user_pages_fast() fall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * back to the slow path. If this is not supported as part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * the grant map, it will be done afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (xen_feature(XENFEAT_gnttab_map_avail_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) flags |= (1 << _GNTMAP_guest_avail0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) map->grants[pgnr].ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) map->grants[pgnr].domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) -1 /* handle */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int gntdev_map_grant_pages(struct gntdev_grant_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!use_ptemod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* Note: it could already be mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (map->map_ops[0].handle != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) for (i = 0; i < map->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) unsigned long addr = (unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) pfn_to_kaddr(page_to_pfn(map->pages[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) map->grants[i].ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) map->grants[i].domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) gnttab_set_unmap_op(&map->unmap_ops[i], addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) map->flags, -1 /* handle */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * Setup the map_ops corresponding to the pte entries pointing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * to the kernel linear addresses of the struct pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * These ptes are completely different from the user ptes dealt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * with find_grant_ptes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Note that GNTMAP_device_map isn't needed here: The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * dev_bus_addr output field gets consumed only from ->map_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * and by not requesting it when mapping we also avoid needing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * reference to the page in the hypervisor).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned int flags = (map->flags & ~GNTMAP_device_map) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) GNTMAP_host_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) for (i = 0; i < map->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned long address = (unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) pfn_to_kaddr(page_to_pfn(map->pages[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) BUG_ON(PageHighMem(map->pages[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) gnttab_set_map_op(&map->kmap_ops[i], address, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) map->grants[i].ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) map->grants[i].domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) gnttab_set_unmap_op(&map->kunmap_ops[i], address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) flags, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pr_debug("map %d+%d\n", map->index, map->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) map->pages, map->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) for (i = 0; i < map->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (map->map_ops[i].status == GNTST_okay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) map->unmap_ops[i].handle = map->map_ops[i].handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) else if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (map->flags & GNTMAP_device_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (use_ptemod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (map->kmap_ops[i].status == GNTST_okay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) else if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct gntab_unmap_queue_data unmap_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int pgno = (map->notify.addr >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (pgno >= offset && pgno < offset + pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* No need for kmap, pages are in lowmem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) unmap_data.unmap_ops = map->unmap_ops + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) unmap_data.pages = map->pages + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) unmap_data.count = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) err = gnttab_unmap_refs_sync(&unmap_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) for (i = 0; i < pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (map->unmap_ops[offset+i].status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) pr_debug("unmap handle=%d st=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) map->unmap_ops[offset+i].handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) map->unmap_ops[offset+i].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) map->unmap_ops[offset+i].handle = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int range, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* It is possible the requested range will have a "hole" where we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * already unmapped some of the grants. Only unmap valid ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) while (pages && !err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) while (pages && map->unmap_ops[offset].handle == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) range = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) while (range < pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (map->unmap_ops[offset+range].handle == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) range++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) err = __unmap_grant_pages(map, offset, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) offset += range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) pages -= range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* ------------------------------------------------------------------ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void gntdev_vma_open(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct gntdev_grant_map *map = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) pr_debug("gntdev_vma_open %p\n", vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) refcount_inc(&map->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void gntdev_vma_close(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct gntdev_grant_map *map = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct file *file = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct gntdev_priv *priv = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) pr_debug("gntdev_vma_close %p\n", vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (use_ptemod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) WARN_ON(map->vma != vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) mmu_interval_notifier_remove(&map->notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) map->vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) vma->vm_private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) gntdev_put_map(priv, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct gntdev_grant_map *map = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static const struct vm_operations_struct gntdev_vmops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) .open = gntdev_vma_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) .close = gntdev_vma_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) .find_special_page = gntdev_vma_find_special_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* ------------------------------------------------------------------ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) const struct mmu_notifier_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned long cur_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct gntdev_grant_map *map =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) container_of(mn, struct gntdev_grant_map, notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) unsigned long mstart, mend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (!mmu_notifier_range_blockable(range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * If the VMA is split or otherwise changed the notifier is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * updated, but we don't want to process VA's outside the modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * VMA. FIXME: It would be much more understandable to just prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * modifying the VMA in the first place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (map->vma->vm_start >= range->end ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) map->vma->vm_end <= range->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) mstart = max(range->start, map->vma->vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) mend = min(range->end, map->vma->vm_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) map->index, map->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) map->vma->vm_start, map->vma->vm_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) range->start, range->end, mstart, mend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) err = unmap_grant_pages(map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) (mstart - map->vma->vm_start) >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) (mend - mstart) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) WARN_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .invalidate = gntdev_invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* ------------------------------------------------------------------ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static int gntdev_open(struct inode *inode, struct file *flip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct gntdev_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) priv = kzalloc(sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) INIT_LIST_HEAD(&priv->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) mutex_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #ifdef CONFIG_XEN_GNTDEV_DMABUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) priv->dmabuf_priv = gntdev_dmabuf_init(flip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (IS_ERR(priv->dmabuf_priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int ret = PTR_ERR(priv->dmabuf_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) flip->private_data = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) priv->dma_dev = gntdev_miscdev.this_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) pr_debug("priv %p\n", priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static int gntdev_release(struct inode *inode, struct file *flip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct gntdev_priv *priv = flip->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) pr_debug("priv %p\n", priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) while (!list_empty(&priv->maps)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) map = list_entry(priv->maps.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct gntdev_grant_map, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) list_del(&map->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) gntdev_put_map(NULL /* already removed */, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #ifdef CONFIG_XEN_GNTDEV_DMABUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) gntdev_dmabuf_fini(priv->dmabuf_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct ioctl_gntdev_map_grant_ref __user *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct ioctl_gntdev_map_grant_ref op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (copy_from_user(&op, u, sizeof(op)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) pr_debug("priv %p, add %d\n", priv, op.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (unlikely(gntdev_test_page_count(op.count)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (copy_from_user(map->grants, &u->refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) sizeof(map->grants[0]) * op.count) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) gntdev_put_map(NULL, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) gntdev_add_map(priv, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) op.index = map->index << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (copy_to_user(u, &op, sizeof(op)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct ioctl_gntdev_unmap_grant_ref __user *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct ioctl_gntdev_unmap_grant_ref op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (copy_from_user(&op, u, sizeof(op)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) list_del(&map->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) gntdev_put_map(priv, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct ioctl_gntdev_get_offset_for_vaddr __user *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct ioctl_gntdev_get_offset_for_vaddr op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) int rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (copy_from_user(&op, u, sizeof(op)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) mmap_read_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) vma = find_vma(current->mm, op.vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!vma || vma->vm_ops != &gntdev_vmops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) map = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) op.offset = map->index << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) op.count = map->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) mmap_read_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct ioctl_gntdev_unmap_notify op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int out_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) evtchn_port_t out_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (copy_from_user(&op, u, sizeof(op)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* We need to grab a reference to the event channel we are going to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * to send the notify before releasing the reference we may already have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * (if someone has called this ioctl twice). This is required so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * it is possible to change the clear_byte part of the notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * without disturbing the event channel part, which may now be the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * reference to that event channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (evtchn_get(op.event_channel_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) out_flags = op.action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) out_event = op.event_channel_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) list_for_each_entry(map, &priv->maps, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) uint64_t begin = map->index << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) uint64_t end = (map->index + map->count) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (op.index >= begin && op.index < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) rc = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) (map->flags & GNTMAP_readonly)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) out_flags = map->notify.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) out_event = map->notify.event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) map->notify.flags = op.action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) map->notify.addr = op.index - (map->index << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) map->notify.event = op.event_channel_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* Drop the reference to the event channel we did not save in the map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) evtchn_put(out_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) #define GNTDEV_COPY_BATCH 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct gntdev_copy_batch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct gnttab_copy ops[GNTDEV_COPY_BATCH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct page *pages[GNTDEV_COPY_BATCH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) s16 __user *status[GNTDEV_COPY_BATCH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) unsigned int nr_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) unsigned int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) bool writeable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) unsigned long *gfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) unsigned long addr = (unsigned long)virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned long xen_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) batch->pages[batch->nr_pages++] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) *gfn = pfn_to_gfn(xen_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static void gntdev_put_pages(struct gntdev_copy_batch *batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) batch->nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) batch->writeable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static int gntdev_copy(struct gntdev_copy_batch *batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) gnttab_batch_copy(batch->ops, batch->nr_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) gntdev_put_pages(batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * For each completed op, update the status if the op failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * and all previous ops for the segment were successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) for (i = 0; i < batch->nr_ops; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) s16 status = batch->ops[i].status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) s16 old_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (status == GNTST_okay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (__get_user(old_status, batch->status[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (old_status != GNTST_okay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (__put_user(status, batch->status[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) batch->nr_ops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct gntdev_grant_copy_segment *seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) s16 __user *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) uint16_t copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * Disallow local -> local copies since there is only space in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * batch->pages for one page per-op and this would be a very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * expensive memcpy().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* Can't cross page if source/dest is a grant ref. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (seg->flags & GNTCOPY_source_gref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (seg->flags & GNTCOPY_dest_gref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (put_user(GNTST_okay, status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) while (copied < seg->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct gnttab_copy *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) void __user *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) size_t len, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) unsigned long gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ret = gntdev_copy(batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) len = seg->len - copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) op = &batch->ops[batch->nr_ops];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) op->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (seg->flags & GNTCOPY_source_gref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) op->source.u.ref = seg->source.foreign.ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) op->source.domid = seg->source.foreign.domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) op->source.offset = seg->source.foreign.offset + copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) op->flags |= GNTCOPY_source_gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) virt = seg->source.virt + copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) off = (unsigned long)virt & ~XEN_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) len = min(len, (size_t)XEN_PAGE_SIZE - off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) batch->writeable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ret = gntdev_get_page(batch, virt, &gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) op->source.u.gmfn = gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) op->source.domid = DOMID_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) op->source.offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (seg->flags & GNTCOPY_dest_gref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) op->dest.u.ref = seg->dest.foreign.ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) op->dest.domid = seg->dest.foreign.domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) op->dest.offset = seg->dest.foreign.offset + copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) op->flags |= GNTCOPY_dest_gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) virt = seg->dest.virt + copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) off = (unsigned long)virt & ~XEN_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) len = min(len, (size_t)XEN_PAGE_SIZE - off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) batch->writeable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ret = gntdev_get_page(batch, virt, &gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) op->dest.u.gmfn = gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) op->dest.domid = DOMID_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) op->dest.offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) op->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) copied += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) batch->status[batch->nr_ops] = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) batch->nr_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct ioctl_gntdev_grant_copy copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct gntdev_copy_batch batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (copy_from_user(©, u, sizeof(copy)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) batch.nr_ops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) batch.nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) for (i = 0; i < copy.count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct gntdev_grant_copy_segment seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (copy_from_user(&seg, ©.segments[i], sizeof(seg))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ret = gntdev_grant_copy_seg(&batch, &seg, ©.segments[i].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (batch.nr_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ret = gntdev_copy(&batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) gntdev_put_pages(&batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static long gntdev_ioctl(struct file *flip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct gntdev_priv *priv = flip->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) void __user *ptr = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) case IOCTL_GNTDEV_MAP_GRANT_REF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return gntdev_ioctl_map_grant_ref(priv, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) case IOCTL_GNTDEV_UNMAP_GRANT_REF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return gntdev_ioctl_unmap_grant_ref(priv, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return gntdev_ioctl_notify(priv, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) case IOCTL_GNTDEV_GRANT_COPY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return gntdev_ioctl_grant_copy(priv, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) #ifdef CONFIG_XEN_GNTDEV_DMABUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct gntdev_priv *priv = flip->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) int index = vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) int count = vma_pages(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct gntdev_grant_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) pr_debug("map %d+%d at %lx (pgoff %lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) index, count, vma->vm_start, vma->vm_pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) map = gntdev_find_map_index(priv, index, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (use_ptemod && map->vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) refcount_inc(&map->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) vma->vm_ops = &gntdev_vmops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (use_ptemod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) vma->vm_flags |= VM_DONTCOPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) vma->vm_private_data = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (map->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if ((vma->vm_flags & VM_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) (map->flags & GNTMAP_readonly))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) goto out_unlock_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) map->flags = GNTMAP_host_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (!(vma->vm_flags & VM_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) map->flags |= GNTMAP_readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (use_ptemod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) map->vma = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) err = mmu_interval_notifier_insert_locked(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) &map->notifier, vma->vm_mm, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) map->vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) goto out_unlock_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (use_ptemod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * gntdev takes the address of the PTE in find_grant_ptes() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * passes it to the hypervisor in gntdev_map_grant_pages(). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * purpose of the notifier is to prevent the hypervisor pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * to the PTE from going stale.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * Since this vma's mappings can't be touched without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * mmap_lock, and we are holding it now, there is no need for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * the notifier_range locking pattern.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) mmu_interval_read_begin(&map->notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) map->pages_vm_start = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) err = apply_to_page_range(vma->vm_mm, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) vma->vm_end - vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) find_grant_ptes, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) pr_warn("find_grant_ptes() failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) goto out_put_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) err = gntdev_map_grant_pages(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) goto out_put_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (!use_ptemod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) err = vm_map_pages_zero(vma, map->pages, map->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) goto out_put_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * If the PTEs were not made special by the grant map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * hypercall, do so here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * This is racy since the mapping is already visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * to userspace but userspace should be well-behaved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * enough to not touch it until the mmap() call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) apply_to_page_range(vma->vm_mm, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) vma->vm_end - vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) set_grant_ptes_as_special, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) out_unlock_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) out_put_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (use_ptemod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) unmap_grant_pages(map, 0, map->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (map->vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) mmu_interval_notifier_remove(&map->notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) map->vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) gntdev_put_map(priv, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static const struct file_operations gntdev_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) .open = gntdev_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) .release = gntdev_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) .mmap = gntdev_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) .unlocked_ioctl = gntdev_ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static struct miscdevice gntdev_miscdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) .minor = MISC_DYNAMIC_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) .name = "xen/gntdev",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) .fops = &gntdev_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* ------------------------------------------------------------------ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static int __init gntdev_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (!xen_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) err = misc_register(&gntdev_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (err != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) pr_err("Could not register gntdev device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static void __exit gntdev_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) misc_deregister(&gntdev_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) module_init(gntdev_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) module_exit(gntdev_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* ------------------------------------------------------------------ */