^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * grant_table.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Two sets of functionality:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * 1. Granting foreign access to our memory reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * 2. Accessing others' memory reservations via grant references.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (i.e., mechanisms for both sender and recipient of grant references)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (c) 2004-2005, K A Fraser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (c) 2005, Christopher Clark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #ifndef __ASM_GNTTAB_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define __ASM_GNTTAB_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <xen/interface/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <xen/interface/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <xen/features.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/page-flags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define GNTTAB_RESERVED_XENSTORE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define NR_GRANT_FRAMES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct gnttab_free_callback {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct gnttab_free_callback *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void (*fn)(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u16 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct gntab_unmap_queue_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct gntab_unmap_queue_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct delayed_work gnttab_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) gnttab_unmap_refs_done done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct gnttab_unmap_grant_ref *unmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct gnttab_unmap_grant_ref *kunmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned int age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int gnttab_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int gnttab_suspend(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int gnttab_resume(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int readonly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * End access through the given grant reference, iff the grant entry is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Eventually end access through the given grant reference, and once that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * access has been ended, free the given page too. Access will be ended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * immediately iff the grant entry is not in use, otherwise it will happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * some time later. page may be 0, in which case no freeing will occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Note that the granted page might still be accessed (read or write) by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * other side after gnttab_end_foreign_access() returns, so even if page was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * specified as 0 it is not allowed to just reuse the page for other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * purposes immediately. gnttab_end_foreign_access() will take an additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * reference to the granted page in this case, which is dropped only after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * the grant is no longer in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * This requires that multi page allocations for areas subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * via free_pages_exact()) in order to avoid high order pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * End access through the given grant reference, iff the grant entry is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * no longer in use. In case of success ending foreign access, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * grant reference is deallocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Return 1 if the grant entry was freed, 0 if it is still in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int gnttab_try_end_foreign_access(grant_ref_t ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * operations on reserved batches of grant references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void gnttab_free_grant_reference(grant_ref_t ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void gnttab_free_grant_references(grant_ref_t head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void gnttab_release_grant_reference(grant_ref_t *private_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) grant_ref_t release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void gnttab_request_free_callback(struct gnttab_free_callback *callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void (*fn)(void *), void *arg, u16 count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long frame, int readonly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Give access to the first 4K of the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline void gnttab_page_grant_foreign_access_ref_one(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) grant_ref_t ref, domid_t domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct page *page, int readonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) readonly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned long pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) uint32_t flags, grant_ref_t ref, domid_t domid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (flags & GNTMAP_contains_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) map->host_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) else if (xen_feature(XENFEAT_auto_translated_physmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) map->host_addr = __pa(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) map->host_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) map->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) map->ref = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) map->dom = domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) map->status = 1; /* arbitrary positive value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) uint32_t flags, grant_handle_t handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (flags & GNTMAP_contains_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unmap->host_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) else if (xen_feature(XENFEAT_auto_translated_physmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) unmap->host_addr = __pa(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unmap->host_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unmap->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unmap->dev_bus_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned long max_nr_gframes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void **__shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned long max_nr_gframes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) grant_status_t **__shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct grant_frames {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) xen_pfn_t *pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) extern struct grant_frames xen_auto_xlat_grant_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned int gnttab_max_grant_frames(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void gnttab_free_auto_xlat_frames(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int gnttab_alloc_pages(int nr_pages, struct page **pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void gnttab_free_pages(int nr_pages, struct page **pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct gnttab_page_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct page *pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct list_head pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned int num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void gnttab_page_cache_init(struct gnttab_page_cache *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned int num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned int num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct gnttab_dma_alloc_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Device for which DMA memory will be/was allocated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* If set then DMA buffer is coherent and write-combine otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) bool coherent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) xen_pfn_t *frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) dma_addr_t dev_bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int gnttab_pages_set_private(int nr_pages, struct page **pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) void gnttab_pages_clear_private(int nr_pages, struct page **pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct gnttab_map_grant_ref *kmap_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct page **pages, unsigned int count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct gnttab_unmap_grant_ref *kunmap_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct page **pages, unsigned int count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* Perform a batch of grant map/copy operations. Retry every batch slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * for which the hypervisor returns GNTST_eagain. This is typically due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * to paged out target frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Return value in each iand every status field of the batch guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * to not be GNTST_eagain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct xen_page_foreign {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) domid_t domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) grant_ref_t gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!PageForeign(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #if BITS_PER_LONG < 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return (struct xen_page_foreign *)page->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return (struct xen_page_foreign *)&page->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Split Linux page in chunk of the size of the grant and call fn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * Parameters of fn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * gfn: guest frame number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * offset: offset in the grant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * len: length of the data in the grant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * data: internal information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unsigned int len, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) void gnttab_foreach_grant_in_range(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) xen_grant_fn_t fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Helper to get to call fn only on the first "grant chunk" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned len, xen_grant_fn_t fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* The first request is limited to the size of one grant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) gnttab_foreach_grant_in_range(page, offset, len, fn, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Get @nr_grefs grants from an array of page and call fn for each grant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) void gnttab_foreach_grant(struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) unsigned int nr_grefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) xen_grant_fn_t fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Get the number of grant in a specified region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * start: Offset from the beginning of the first page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * len: total length of data (can cross multiple page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static inline unsigned int gnttab_count_grant(unsigned int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return XEN_PFN_UP(xen_offset_in_page(start) + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #endif /* __ASM_GNTTAB_H__ */