^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0 OR MIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Xen frontend/backend page directory based shared buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * helper module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2018 EPAM Systems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <xen/balloon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <xen/interface/io/ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <xen/xen-front-pgdir-shbuf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #ifndef GRANT_INVALID_REF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * FIXME: usage of grant reference 0 as invalid grant reference:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * grant reference 0 is valid, but never exposed to a PV driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * because of the fact it is already in use/reserved by the PV console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define GRANT_INVALID_REF 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * This structure represents the structure of a shared page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * that contains grant references to the pages of the shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * buffer. This structure is common to many Xen para-virtualized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * protocols at include/xen/interface/io/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct xen_page_directory {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) grant_ref_t gref_dir_next_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) grant_ref_t gref[1]; /* Variable length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Shared buffer ops which are differently implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * depending on the allocation mode, e.g. if the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * is allocated by the corresponding backend or frontend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Some of the operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct xen_front_pgdir_shbuf_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Calculate number of grefs required to handle this buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * e.g. if grefs are required for page directory only or the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * pages as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Fill page directory according to para-virtual display protocol. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Claim grant references for the pages of the buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) grant_ref_t *priv_gref_head, int gref_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Map grant references of the buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int (*map)(struct xen_front_pgdir_shbuf *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Unmap grant references of the buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int (*unmap)(struct xen_front_pgdir_shbuf *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Get granted reference to the very first page of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * page directory. Usually this is passed to the backend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * so it can find/fill the grant references to the buffer's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * \param buf shared buffer which page directory is of interest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * \return granted reference to the very first page of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * page directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) grant_ref_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (!buf->grefs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return buf->grefs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Map granted references of the shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Depending on the shared buffer mode of allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * (be_alloc flag) this can either do nothing (for buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * shared by the frontend itself) or map the provided granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * references onto the backing storage (buf->pages).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * \param buf shared buffer which grants to be maped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (buf->ops && buf->ops->map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return buf->ops->map(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* No need to map own grant references. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Unmap granted references of the shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Depending on the shared buffer mode of allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * (be_alloc flag) this can either do nothing (for buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * shared by the frontend itself) or unmap the provided granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * references.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * \param buf shared buffer which grants to be unmaped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (buf->ops && buf->ops->unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return buf->ops->unmap(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* No need to unmap own grant references. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Free all the resources of the shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * \param buf shared buffer which resources to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (buf->grefs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) for (i = 0; i < buf->num_grefs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (buf->grefs[i] != GRANT_INVALID_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) gnttab_end_foreign_access(buf->grefs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 0, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) kfree(buf->grefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) kfree(buf->directory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * Number of grefs a page can hold with respect to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * struct xen_page_directory header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) offsetof(struct xen_page_directory, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) gref)) / sizeof(grant_ref_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Get the number of pages the page directory consumes itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * \param buf shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Calculate the number of grant references needed to share the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * and its pages when backend allocates the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * \param buf shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Only for pages the page directory consumes itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) buf->num_grefs = get_num_pages_dir(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Calculate the number of grant references needed to share the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * and its pages when frontend allocates the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * \param buf shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Number of pages the page directory consumes itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * plus grefs for the buffer pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define xen_page_to_vaddr(page) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * Unmap the buffer previously mapped with grant references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * provided by the backend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * \param buf shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct gnttab_unmap_grant_ref *unmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (!unmap_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) for (i = 0; i < buf->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) phys_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) addr = xen_page_to_vaddr(buf->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) buf->backend_map_handles[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) buf->num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) for (i = 0; i < buf->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (unlikely(unmap_ops[i].status != GNTST_okay))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dev_err(&buf->xb_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) "Failed to unmap page %d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) i, unmap_ops[i].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dev_err(&buf->xb_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) "Failed to unmap grant references, ret %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) kfree(unmap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) kfree(buf->backend_map_handles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) buf->backend_map_handles = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * Map the buffer with grant references provided by the backend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * \param buf shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int backend_map(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct gnttab_map_grant_ref *map_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) unsigned char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (!map_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) buf->backend_map_handles = kcalloc(buf->num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) sizeof(*buf->backend_map_handles),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!buf->backend_map_handles) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kfree(map_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * Read page directory to get grefs from the backend: for external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * buffer we only allocate buf->grefs for the page directory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * so buf->num_grefs has number of pages in the page directory itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ptr = buf->directory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) grefs_left = buf->num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) cur_page = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct xen_page_directory *page_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) (struct xen_page_directory *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int to_copy = XEN_NUM_GREFS_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (to_copy > grefs_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) to_copy = grefs_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) phys_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) addr = xen_page_to_vaddr(buf->pages[cur_page]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) gnttab_set_map_op(&map_ops[cur_page], addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) GNTMAP_host_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) page_dir->gref[cur_gref],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) buf->xb_dev->otherend_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) cur_page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) grefs_left -= to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ptr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Save handles even if error, so we can unmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (unlikely(map_ops[cur_page].status != GNTST_okay))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dev_err(&buf->xb_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) "Failed to map page %d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) cur_page, map_ops[cur_page].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) dev_err(&buf->xb_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) "Failed to map grant references, ret %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) backend_unmap(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) kfree(map_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Fill page directory with grant references to the pages of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * page directory itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * The grant references to the buffer pages are provided by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * backend in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * \param buf shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct xen_page_directory *page_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int i, num_pages_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ptr = buf->directory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) num_pages_dir = get_num_pages_dir(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Fill only grefs for the page directory itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) for (i = 0; i < num_pages_dir - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) page_dir = (struct xen_page_directory *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) page_dir->gref_dir_next_page = buf->grefs[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ptr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Last page must say there is no more pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) page_dir = (struct xen_page_directory *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) page_dir->gref_dir_next_page = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * Fill page directory with grant references to the pages of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * page directory and the buffer we share with the backend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * \param buf shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int cur_gref, grefs_left, to_copy, i, num_pages_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ptr = buf->directory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) num_pages_dir = get_num_pages_dir(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * While copying, skip grefs at start, they are for pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * granted for the page directory itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) cur_gref = num_pages_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) grefs_left = buf->num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) for (i = 0; i < num_pages_dir; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct xen_page_directory *page_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) (struct xen_page_directory *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) to_copy = grefs_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) page_dir->gref_dir_next_page = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) to_copy = XEN_NUM_GREFS_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) page_dir->gref_dir_next_page = buf->grefs[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) memcpy(&page_dir->gref, &buf->grefs[cur_gref],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) to_copy * sizeof(grant_ref_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ptr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) grefs_left -= to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) cur_gref += to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * Grant references to the frontend's buffer pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * These will be shared with the backend, so it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * access the buffer's data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * \param buf shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) grant_ref_t *priv_gref_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int gref_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int i, cur_ref, otherend_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) otherend_id = buf->xb_dev->otherend_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) for (i = 0; i < buf->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) cur_ref = gnttab_claim_grant_reference(priv_gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (cur_ref < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return cur_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) xen_page_to_gfn(buf->pages[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) buf->grefs[gref_idx++] = cur_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Grant all the references needed to share the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * Grant references to the page directory pages and, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * needed, also to the pages of the shared buffer data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * \param buf shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int grant_references(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) grant_ref_t priv_gref_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int ret, i, j, cur_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int otherend_id, num_pages_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dev_err(&buf->xb_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) "Cannot allocate grant references\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) otherend_id = buf->xb_dev->otherend_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) num_pages_dir = get_num_pages_dir(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) for (i = 0; i < num_pages_dir; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) unsigned long frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (cur_ref < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return cur_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) frame = xen_page_to_gfn(virt_to_page(buf->directory +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) PAGE_SIZE * i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) buf->grefs[j++] = cur_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (buf->ops->grant_refs_for_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) gnttab_free_grant_references(priv_gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * Allocate all required structures to mange shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * \param buf shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!buf->grefs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!buf->directory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * For backend allocated buffers we don't need grant_refs_for_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * as those grant references are allocated at backend side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static const struct xen_front_pgdir_shbuf_ops backend_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) .calc_num_grefs = backend_calc_num_grefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) .fill_page_dir = backend_fill_page_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .map = backend_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .unmap = backend_unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * For locally granted references we do not need to map/unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * the references.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static const struct xen_front_pgdir_shbuf_ops local_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) .calc_num_grefs = guest_calc_num_grefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) .fill_page_dir = guest_fill_page_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) .grant_refs_for_buffer = guest_grant_refs_for_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * Allocate a new instance of a shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * \param cfg configuration to be used while allocating a new shared buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (cfg->be_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) buf->ops = &backend_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) buf->ops = &local_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) buf->xb_dev = cfg->xb_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) buf->num_pages = cfg->num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) buf->pages = cfg->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) buf->ops->calc_num_grefs(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ret = alloc_storage(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ret = grant_references(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) buf->ops->fill_page_dir(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) xen_front_pgdir_shbuf_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) MODULE_DESCRIPTION("Xen frontend/backend page directory based "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) "shared buffer handling");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) MODULE_AUTHOR("Oleksandr Andrushchenko");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) MODULE_LICENSE("GPL");