^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * gntalloc.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Device for creating grant references (in user-space) that may be shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * with other domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This program is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * You should have received a copy of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * along with this program; if not, write to the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * This driver exists to allow userspace programs in Linux to allocate kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * memory that will later be shared with another domain. Without this device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Linux userspace programs cannot create grant references.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * How this stuff works:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * X -> granting a page to Y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Y -> mapping the grant from X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * 1. X uses the gntalloc device to allocate a page of kernel memory, P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * 2. X creates an entry in the grant table that says domid(Y) can access P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * This is done without a hypercall unless the grant table needs expansion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * 3. X gives the grant reference identifier, GREF, to Y.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * 4. Y maps the page, either directly into kernel memory for use in a backend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * driver, or via a the gntdev device to map into the address space of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * application running in Y. This is the first point at which Xen does any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * tracking of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * 5. A program in X mmap()s a segment of the gntalloc device that corresponds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * to the shared page, and can now communicate with Y over the shared page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * NOTE TO USERSPACE LIBRARIES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * The grant allocation and mmap()ing are, naturally, two separate operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * You set up the sharing by calling the create ioctl() and then the mmap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Teardown requires munmap() and either close() or ioctl().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * WARNING: Since Xen does not allow a guest to forcibly end the use of a grant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * reference, this device can be used to consume kernel memory by leaving grant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * references mapped by another domain when an application exits. Therefore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * there is a global limit on the number of pages that can be allocated. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * all references to the page are unmapped, it will be freed during the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * grant operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <xen/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <xen/gntalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static int limit = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) module_param(limit, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) "the gntalloc device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static LIST_HEAD(gref_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static DEFINE_MUTEX(gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int gref_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct notify_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) uint16_t pgoff:12; /* Bits 0-11: Offset of the byte to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) uint16_t flags:2; /* Bits 12-13: Unmap notification flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int event; /* Port (event channel) to notify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Metadata on a grant reference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct gntalloc_gref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct list_head next_gref; /* list entry gref_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct list_head next_file; /* list entry file->list, if open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct page *page; /* The shared page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) uint64_t file_index; /* File offset for mmap() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned int users; /* Use count - when zero, waiting on Xen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) grant_ref_t gref_id; /* The grant reference number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct notify_info notify; /* Unmap notification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct gntalloc_file_private_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) uint64_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct gntalloc_vma_private_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct gntalloc_gref *gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void __del_gref(struct gntalloc_gref *gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void do_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct gntalloc_gref *gref, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) list_for_each_entry_safe(gref, n, &gref_list, next_gref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!gref->users)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) __del_gref(gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) uint32_t *gref_ids, struct gntalloc_file_private_data *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int i, rc, readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) LIST_HEAD(queue_gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) LIST_HEAD(queue_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct gntalloc_gref *gref, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) for (i = 0; i < op->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) gref = kzalloc(sizeof(*gref), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!gref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) goto undo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) list_add_tail(&gref->next_gref, &queue_gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) list_add_tail(&gref->next_file, &queue_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) gref->users = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) gref->file_index = op->index + i * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!gref->page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) goto undo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Grant foreign access to the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) rc = gnttab_grant_foreign_access(op->domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) xen_page_to_gfn(gref->page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) readonly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) goto undo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) gref_ids[i] = gref->gref_id = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Add to gref lists. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) mutex_lock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) list_splice_tail(&queue_gref, &gref_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) list_splice_tail(&queue_file, &priv->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) mutex_unlock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) undo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) mutex_lock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) gref_size -= (op->count - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) list_for_each_entry_safe(gref, next, &queue_file, next_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) list_del(&gref->next_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) __del_gref(gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mutex_unlock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void __del_gref(struct gntalloc_gref *gref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) uint8_t *tmp = kmap(gref->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) tmp[gref->notify.pgoff] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) kunmap(gref->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) notify_remote_via_evtchn(gref->notify.event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) evtchn_put(gref->notify.event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) gref->notify.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (gref->gref_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (gref->page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) addr = (unsigned long)page_to_virt(gref->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) gnttab_end_foreign_access(gref->gref_id, 0, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) gnttab_free_grant_reference(gref->gref_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) gref_size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) list_del(&gref->next_gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) kfree(gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* finds contiguous grant references in a file, returns the first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static struct gntalloc_gref *find_grefs(struct gntalloc_file_private_data *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) uint64_t index, uint32_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct gntalloc_gref *rv = NULL, *gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) list_for_each_entry(gref, &priv->list, next_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (gref->file_index == index && !rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rv = gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (gref->file_index != index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) index += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * -------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * File operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * -------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static int gntalloc_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct gntalloc_file_private_data *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) priv = kzalloc(sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) goto out_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) INIT_LIST_HEAD(&priv->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) filp->private_data = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) pr_debug("%s: priv %p\n", __func__, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) out_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int gntalloc_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct gntalloc_file_private_data *priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct gntalloc_gref *gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) pr_debug("%s: priv %p\n", __func__, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) mutex_lock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) while (!list_empty(&priv->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) gref = list_entry(priv->list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct gntalloc_gref, next_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) list_del(&gref->next_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) gref->users--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (gref->users == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) __del_gref(gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) mutex_unlock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct ioctl_gntalloc_alloc_gref __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct ioctl_gntalloc_alloc_gref op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) uint32_t *gref_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) pr_debug("%s: priv %p\n", __func__, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (copy_from_user(&op, arg, sizeof(op))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (!gref_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) mutex_lock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Clean up pages that were at zero (local) users but were still mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * by remote domains. Since those pages count towards the limit that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * are about to enforce, removing them here is a good idea.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) do_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (gref_size + op.count > limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) mutex_unlock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) rc = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) gref_size += op.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) op.index = priv->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) priv->index += op.count * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) mutex_unlock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) rc = add_grefs(&op, gref_ids, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Once we finish add_grefs, it is unsafe to touch the new reference,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * since it is possible for a concurrent ioctl to remove it (by guessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * its index). If the userspace application doesn't provide valid memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * to write the IDs to, then it will need to close the file in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * release - which it will do by segfaulting when it tries to access the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * IDs to close them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (copy_to_user(arg, &op, sizeof(op))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (copy_to_user(arg->gref_ids, gref_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) sizeof(gref_ids[0]) * op.count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) kfree(gref_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int i, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct ioctl_gntalloc_dealloc_gref op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct gntalloc_gref *gref, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) pr_debug("%s: priv %p\n", __func__, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (copy_from_user(&op, arg, sizeof(op))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto dealloc_grant_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) mutex_lock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) gref = find_grefs(priv, op.index, op.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (gref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* Remove from the file list only, and decrease reference count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * The later call to do_cleanup() will remove from gref_list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * free the memory if the pages aren't mapped anywhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) for (i = 0; i < op.count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) n = list_entry(gref->next_file.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct gntalloc_gref, next_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) list_del(&gref->next_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) gref->users--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) gref = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) do_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) mutex_unlock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dealloc_grant_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct ioctl_gntalloc_unmap_notify op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct gntalloc_gref *gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) uint64_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (copy_from_user(&op, arg, sizeof(op)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) index = op.index & ~(PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) pgoff = op.index & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) mutex_lock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) gref = find_grefs(priv, index, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!gref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) rc = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* We need to grab a reference to the event channel we are going to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * to send the notify before releasing the reference we may already have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * (if someone has called this ioctl twice). This is required so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * it is possible to change the clear_byte part of the notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * without disturbing the event channel part, which may now be the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * reference to that event channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (evtchn_get(op.event_channel_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) evtchn_put(gref->notify.event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) gref->notify.flags = op.action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) gref->notify.pgoff = pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) gref->notify.event = op.event_channel_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) mutex_unlock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct gntalloc_file_private_data *priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) case IOCTL_GNTALLOC_ALLOC_GREF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return gntalloc_ioctl_alloc(priv, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) case IOCTL_GNTALLOC_DEALLOC_GREF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return gntalloc_ioctl_dealloc(priv, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) case IOCTL_GNTALLOC_SET_UNMAP_NOTIFY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return gntalloc_ioctl_unmap_notify(priv, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void gntalloc_vma_open(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct gntalloc_vma_private_data *priv = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) mutex_lock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) priv->users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) mutex_unlock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static void gntalloc_vma_close(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct gntalloc_vma_private_data *priv = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct gntalloc_gref *gref, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) mutex_lock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) priv->users--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (priv->users == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) gref = priv->gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) for (i = 0; i < priv->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) gref->users--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) next = list_entry(gref->next_gref.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct gntalloc_gref, next_gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (gref->users == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) __del_gref(gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) gref = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) mutex_unlock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static const struct vm_operations_struct gntalloc_vmops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .open = gntalloc_vma_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .close = gntalloc_vma_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct gntalloc_file_private_data *priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct gntalloc_vma_private_data *vm_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct gntalloc_gref *gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) int count = vma_pages(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int rv, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!(vma->vm_flags & VM_SHARED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) pr_err("%s: Mapping must be shared\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) vm_priv = kmalloc(sizeof(*vm_priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!vm_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) mutex_lock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) pr_debug("%s: priv %p,%p, page %lu+%d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) priv, vm_priv, vma->vm_pgoff, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (gref == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) rv = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) pr_debug("%s: Could not find grant reference",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) kfree(vm_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) vm_priv->gref = gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) vm_priv->users = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) vm_priv->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) vma->vm_private_data = vm_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) vma->vm_ops = &gntalloc_vmops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) gref->users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) rv = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) gref->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) gref = list_entry(gref->next_file.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct gntalloc_gref, next_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) mutex_unlock(&gref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static const struct file_operations gntalloc_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .open = gntalloc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) .release = gntalloc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) .unlocked_ioctl = gntalloc_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) .mmap = gntalloc_mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * -------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * Module creation/destruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * -------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static struct miscdevice gntalloc_miscdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) .minor = MISC_DYNAMIC_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) .name = "xen/gntalloc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) .fops = &gntalloc_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static int __init gntalloc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!xen_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) err = misc_register(&gntalloc_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (err != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) pr_err("Could not register misc gntalloc device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pr_debug("Created grant allocation device at %d,%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) MISC_MAJOR, gntalloc_miscdev.minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static void __exit gntalloc_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) misc_deregister(&gntalloc_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) module_init(gntalloc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) module_exit(gntalloc_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) MODULE_AUTHOR("Carter Weatherly <carter.weatherly@jhuapl.edu>, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) "Daniel De Graaf <dgdegra@tycho.nsa.gov>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) MODULE_DESCRIPTION("User-space grant reference allocator driver");