^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Common functionality of grant device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2006-2007, D G Murray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #ifndef _GNTDEV_COMMON_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define _GNTDEV_COMMON_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mmu_notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <xen/interface/event_channel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct gntdev_dmabuf_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct gntdev_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* Maps with visible offsets in the file descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct list_head maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* lock protects maps and freeable_maps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Device for which DMA memory is allocated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct device *dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #ifdef CONFIG_XEN_GNTDEV_DMABUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct gntdev_dmabuf_priv *dmabuf_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct gntdev_unmap_notify {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Address relative to the start of the gntdev_grant_map. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) evtchn_port_t event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct gntdev_grant_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct mmu_interval_notifier notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) refcount_t users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct gntdev_unmap_notify notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct ioctl_gntdev_grant_ref *grants;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct gnttab_map_grant_ref *map_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct gnttab_unmap_grant_ref *unmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct gnttab_map_grant_ref *kmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct gnttab_unmap_grant_ref *kunmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long pages_vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * If dmabuf_vaddr is not NULL then this mapping is backed by DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * capable memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct device *dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int dma_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void *dma_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) dma_addr_t dma_bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Needed to avoid allocation in gnttab_dma_free_pages(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) xen_pfn_t *frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int dma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) bool gntdev_test_page_count(unsigned int count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int gntdev_map_grant_pages(struct gntdev_grant_map *map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif