^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Client-facing interface for the Xenbus driver. In other words, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * interface between the Xenbus and the device-specific code, be it the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * frontend or the backend of that driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2005 XenSource Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <xen/interface/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <xen/interface/event_channel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <xen/balloon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <xen/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <xen/features.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "xenbus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct xenbus_map_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) } pv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct page *pages[XENBUS_MAX_RING_PAGES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long addrs[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) } hvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned int nr_handles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct map_ring_valloc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct xenbus_map_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Why do we need two arrays? See comment of __xenbus_map_ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long addrs[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static DEFINE_SPINLOCK(xenbus_valloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static LIST_HEAD(xenbus_valloc_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct xenbus_ring_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) grant_ref_t *gnt_refs, unsigned int nr_grefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) void **vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int (*unmap)(struct xenbus_device *dev, void *vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static const struct xenbus_ring_ops *ring_ops __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) const char *xenbus_strstate(enum xenbus_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static const char *const name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) [ XenbusStateUnknown ] = "Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) [ XenbusStateInitialising ] = "Initialising",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) [ XenbusStateInitWait ] = "InitWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) [ XenbusStateInitialised ] = "Initialised",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) [ XenbusStateConnected ] = "Connected",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) [ XenbusStateClosing ] = "Closing",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) [ XenbusStateClosed ] = "Closed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) [XenbusStateReconfiguring] = "Reconfiguring",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) [XenbusStateReconfigured] = "Reconfigured",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) EXPORT_SYMBOL_GPL(xenbus_strstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * xenbus_watch_path - register a watch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @dev: xenbus device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @path: path to watch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @watch: watch to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @callback: callback to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * Register a @watch on the given path, using the given xenbus_watch structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * for storage, and the given @callback function as the callback. Return 0 on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * success, or -errno on error. On success, the given @path will be saved as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * @watch->node, and remains the caller's to free. On error, @watch->node will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * be NULL, the device will switch to %XenbusStateClosing, and the error will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * be saved in the store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int xenbus_watch_path(struct xenbus_device *dev, const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct xenbus_watch *watch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) bool (*will_handle)(struct xenbus_watch *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) const char *, const char *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void (*callback)(struct xenbus_watch *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) const char *, const char *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) watch->node = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) watch->will_handle = will_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) watch->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) err = register_xenbus_watch(watch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) watch->node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) watch->will_handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) watch->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) xenbus_dev_fatal(dev, err, "adding watch on %s", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) EXPORT_SYMBOL_GPL(xenbus_watch_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @dev: xenbus device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * @watch: watch to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @callback: callback to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * @pathfmt: format of path to watch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Register a watch on the given @path, using the given xenbus_watch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * structure for storage, and the given @callback function as the callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Return 0 on success, or -errno on error. On success, the watched path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * kfree(). On error, watch->node will be NULL, so the caller has nothing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * free, the device will switch to %XenbusStateClosing, and the error will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * saved in the store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int xenbus_watch_pathfmt(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct xenbus_watch *watch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) bool (*will_handle)(struct xenbus_watch *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) const char *, const char *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) void (*callback)(struct xenbus_watch *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) const char *, const char *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) const char *pathfmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) char *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) va_start(ap, pathfmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) err = xenbus_watch_path(dev, path, watch, will_handle, callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void xenbus_switch_fatal(struct xenbus_device *, int, int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) const char *, ...);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) __xenbus_switch_state(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) enum xenbus_state state, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* We check whether the state is currently set to the given value, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if not, then the state is set. We don't want to unconditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) write the given state, because we don't want to fire watches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unnecessarily. Furthermore, if the node has gone, we don't write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) to it, as the device will be tearing down, and we don't want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) resurrect that directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) Note that, because of this cached value of our state, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) function will not take a caller's Xenstore transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) (something it was trying to in the past) because dev->state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) would not get reset if the transaction was aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct xenbus_transaction xbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int err, abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (state == dev->state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) err = xenbus_transaction_start(&xbt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) xenbus_switch_fatal(dev, depth, err, "starting transaction");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (err != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) xenbus_switch_fatal(dev, depth, err, "writing new state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) err = xenbus_transaction_end(xbt, abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (err == -EAGAIN && !abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) xenbus_switch_fatal(dev, depth, err, "ending transaction");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dev->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * xenbus_switch_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * @dev: xenbus device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * @state: new state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Advertise in the store a change of the given driver to the given new_state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * Return 0 on success, or -errno on error. On error, the device will switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * to XenbusStateClosing, and the error will be saved in the store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return __xenbus_switch_state(dev, state, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) EXPORT_SYMBOL_GPL(xenbus_switch_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int xenbus_frontend_closed(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) xenbus_switch_state(dev, XenbusStateClosed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) complete(&dev->down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) const char *fmt, va_list ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) char *printf_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) char *path_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define PRINTF_BUFFER_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (!printf_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) len = sprintf(printf_buffer, "%i ", -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dev_err(&dev->dev, "%s\n", printf_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (path_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) kfree(printf_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) kfree(path_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * xenbus_dev_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * @dev: xenbus device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * @err: error to report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @fmt: error message format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Report the given negative errno into the store, along with the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * formatted message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) xenbus_va_dev_error(dev, err, fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) EXPORT_SYMBOL_GPL(xenbus_dev_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * xenbus_dev_fatal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * @dev: xenbus device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @err: error to report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * @fmt: error message format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * closedown of this driver and its peer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) xenbus_va_dev_error(dev, err, fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) xenbus_switch_state(dev, XenbusStateClosing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * avoiding recursion within xenbus_switch_state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) xenbus_va_dev_error(dev, err, fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) __xenbus_switch_state(dev, XenbusStateClosing, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * xenbus_grant_ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * @dev: xenbus device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * @vaddr: starting virtual address of the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * @nr_pages: number of pages to be granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * @grefs: grant reference array to be filled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Grant access to the given @vaddr to the peer of the given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Then fill in @grefs with grant references. Return 0 on success, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * -errno on error. On error, the device will switch to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * XenbusStateClosing, and the error will be saved in the store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) unsigned int nr_pages, grant_ref_t *grefs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) grant_ref_t gref_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) err = gnttab_alloc_grant_references(nr_pages, &gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) xenbus_dev_fatal(dev, err, "granting access to ring page");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (is_vmalloc_addr(vaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) gfn = virt_to_gfn(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) grefs[i] = gnttab_claim_grant_reference(&gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) gfn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) vaddr = vaddr + XEN_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) EXPORT_SYMBOL_GPL(xenbus_grant_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Allocate an event channel for the given xenbus_device, assigning the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * created local port to *port. Return 0 on success, or -errno on error. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * error, the device will switch to XenbusStateClosing, and the error will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * saved in the store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct evtchn_alloc_unbound alloc_unbound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) alloc_unbound.dom = DOMID_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) alloc_unbound.remote_dom = dev->otherend_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) &alloc_unbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) xenbus_dev_fatal(dev, err, "allocating event channel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) *port = alloc_unbound.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * Free an existing event channel. Returns 0 on success or -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct evtchn_close close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) close.port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) xenbus_dev_error(dev, err, "freeing event channel %u", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * xenbus_map_ring_valloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * @dev: xenbus device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * @gnt_refs: grant reference array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * @nr_grefs: number of grant references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * @vaddr: pointer to address to be filled out by mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Map @nr_grefs pages of memory into this domain from another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * pages of virtual address space, maps the pages to that address, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * sets *vaddr to that address. Returns 0 on success, and -errno on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * error. If an error is returned, device will switch to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * XenbusStateClosing and the error message will be saved in XenStore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned int nr_grefs, void **vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct map_ring_valloc *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (nr_grefs > XENBUS_MAX_RING_GRANTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) info = kzalloc(sizeof(*info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!info->node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) kfree(info->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * long), e.g. 32-on-64. Caller is responsible for preparing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * right array to feed into this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static int __xenbus_map_ring(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) grant_ref_t *gnt_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned int nr_grefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) grant_handle_t *handles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct map_ring_valloc *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) bool *leaked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (nr_grefs > XENBUS_MAX_RING_GRANTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) for (i = 0; i < nr_grefs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) gnt_refs[i], dev->otherend_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) handles[i] = INVALID_GRANT_HANDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) gnttab_batch_map(info->map, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) for (i = 0; i < nr_grefs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (info->map[i].status != GNTST_okay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) xenbus_dev_fatal(dev, info->map[i].status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) "mapping in shared page %d from domain %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) gnt_refs[i], dev->otherend_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) handles[i] = info->map[i].handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) for (i = j = 0; i < nr_grefs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (handles[i] != INVALID_GRANT_HANDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) gnttab_set_unmap_op(&info->unmap[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) info->phys_addrs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) GNTMAP_host_map, handles[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) *leaked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) for (i = 0; i < j; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (info->unmap[i].status != GNTST_okay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) *leaked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * xenbus_unmap_ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * @dev: xenbus device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * @handles: grant handle array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * @nr_handles: number of handles in the array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * @vaddrs: addresses to unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * Unmap memory in this domain that was imported from another domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Returns 0 on success and returns GNTST_* on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * (see xen/include/interface/grant_table.h).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) unsigned int nr_handles, unsigned long *vaddrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (nr_handles > XENBUS_MAX_RING_GRANTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) for (i = 0; i < nr_handles; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) gnttab_set_unmap_op(&unmap[i], vaddrs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) GNTMAP_host_map, handles[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) err = GNTST_okay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) for (i = 0; i < nr_handles; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (unmap[i].status != GNTST_okay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) xenbus_dev_error(dev, unmap[i].status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) "unmapping page at handle %d error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) handles[i], unmap[i].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) err = unmap[i].status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) unsigned int goffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct map_ring_valloc *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) info->phys_addrs[info->idx] = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) info->addrs[info->idx] = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) info->idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static int xenbus_map_ring_hvm(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct map_ring_valloc *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) grant_ref_t *gnt_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) unsigned int nr_grefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) void **vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct xenbus_map_node *node = info->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) bool leaked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) gnttab_foreach_grant(node->hvm.pages, nr_grefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) xenbus_map_ring_setup_grant_hvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) info, GNTMAP_host_map, &leaked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) node->nr_handles = nr_grefs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) goto out_free_ballooned_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) goto out_xenbus_unmap_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) node->hvm.addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) spin_lock(&xenbus_valloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) list_add(&node->next, &xenbus_valloc_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) spin_unlock(&xenbus_valloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) *vaddr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) info->node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) out_xenbus_unmap_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!leaked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) pr_alert("leaking %p size %u page(s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) addr, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) out_free_ballooned_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!leaked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * xenbus_unmap_ring_vfree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * @dev: xenbus device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * @vaddr: addr to unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * Based on Rusty Russell's skeleton driver's unmap_page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * Unmap a page of memory in this domain that was imported from another domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * Use xenbus_unmap_ring_vfree if you mapped in your memory with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * xenbus_map_ring_valloc (it will free the virtual address space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Returns 0 on success and returns GNTST_* on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * (see xen/include/interface/grant_table.h).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return ring_ops->unmap(dev, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) #ifdef CONFIG_XEN_PV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static int map_ring_apply(pte_t *pte, unsigned long addr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct map_ring_valloc *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static int xenbus_map_ring_pv(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct map_ring_valloc *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) grant_ref_t *gnt_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) unsigned int nr_grefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) void **vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct xenbus_map_node *node = info->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) bool leaked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) info, GNTMAP_host_map | GNTMAP_contains_pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) &leaked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) node->nr_handles = nr_grefs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) node->pv.area = area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) spin_lock(&xenbus_valloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) list_add(&node->next, &xenbus_valloc_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) spin_unlock(&xenbus_valloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) *vaddr = area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) info->node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (!leaked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) free_vm_area(area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct xenbus_map_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) unsigned int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) bool leaked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) spin_lock(&xenbus_valloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) list_for_each_entry(node, &xenbus_valloc_pages, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (node->pv.area->addr == vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) list_del(&node->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) spin_unlock(&xenbus_valloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) xenbus_dev_error(dev, -ENOENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) "can't find mapped virtual address %p", vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return GNTST_bad_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) for (i = 0; i < node->nr_handles; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) memset(&unmap[i], 0, sizeof(unmap[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) unmap[i].host_addr = arbitrary_virt_to_machine(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) lookup_address(addr, &level)).maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) unmap[i].dev_bus_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) unmap[i].handle = node->handles[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) err = GNTST_okay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) leaked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) for (i = 0; i < node->nr_handles; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (unmap[i].status != GNTST_okay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) leaked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) xenbus_dev_error(dev, unmap[i].status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) "unmapping page at handle %d error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) node->handles[i], unmap[i].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) err = unmap[i].status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (!leaked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) free_vm_area(node->pv.area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) pr_alert("leaking VM area %p size %u page(s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) node->pv.area, node->nr_handles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static const struct xenbus_ring_ops ring_ops_pv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) .map = xenbus_map_ring_pv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) .unmap = xenbus_unmap_ring_pv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct unmap_ring_hvm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) unsigned long addrs[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned int goffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct unmap_ring_hvm *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) info->idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct xenbus_map_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct unmap_ring_hvm info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) .idx = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) unsigned int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) spin_lock(&xenbus_valloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) list_for_each_entry(node, &xenbus_valloc_pages, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) addr = node->hvm.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (addr == vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) list_del(&node->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) node = addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) spin_unlock(&xenbus_valloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) xenbus_dev_error(dev, -ENOENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) "can't find mapped virtual address %p", vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return GNTST_bad_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) nr_pages = XENBUS_PAGES(node->nr_handles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) xenbus_unmap_ring_setup_grant_hvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) info.addrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (!rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) vunmap(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * xenbus_read_driver_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * @path: path for driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * Return the state of the driver rooted at the given store path, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * XenbusStateUnknown if no state can be read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) enum xenbus_state xenbus_read_driver_state(const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) enum xenbus_state result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) result = XenbusStateUnknown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static const struct xenbus_ring_ops ring_ops_hvm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) .map = xenbus_map_ring_hvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) .unmap = xenbus_unmap_ring_hvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) void __init xenbus_ring_ops_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) #ifdef CONFIG_XEN_PV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (!xen_feature(XENFEAT_auto_translated_physmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ring_ops = &ring_ops_pv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ring_ops = &ring_ops_hvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }