^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * xenbus_xs.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This is the kernel equivalent of the "xs" library. We don't need everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * and we use xenbus_comms for communication.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2005 Rusty Russell, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "xenbus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Framework to protect suspend/resume handling against normal Xenstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * message handling:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * During suspend/resume there must be no open transaction and no pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Xenstore request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * New watch events happening in this time can be ignored by firing all watches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * after resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Lock protecting enter/exit critical region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static DEFINE_SPINLOCK(xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Number of users in critical region (protected by xs_state_lock). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static unsigned int xs_state_users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Suspend handler waiting or already active (protected by xs_state_lock)? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static int xs_suspend_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Unique Xenstore request id (protected by xs_state_lock). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static uint32_t xs_request_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Wait queue for all callers waiting for critical region to become usable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static DECLARE_WAIT_QUEUE_HEAD(xs_state_enter_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Wait queue for suspend handling waiting for critical region being empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static DECLARE_WAIT_QUEUE_HEAD(xs_state_exit_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* List of registered watches, and a lock to protect it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static LIST_HEAD(watches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static DEFINE_SPINLOCK(watches_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* List of pending watch callback events, and a lock to protect it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static LIST_HEAD(watch_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static DEFINE_SPINLOCK(watch_events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Protect watch (de)register against save/restore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static DECLARE_RWSEM(xs_watch_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Details of the xenwatch callback kernel thread. The thread waits on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * watch_events_waitq for work to do (queued on watch_events list). When it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * wakes up it acquires the xenwatch_mutex before reading the list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * carrying out work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static pid_t xenwatch_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static DEFINE_MUTEX(xenwatch_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static void xs_suspend_enter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) spin_lock(&xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) xs_suspend_active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) spin_unlock(&xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) wait_event(xs_state_exit_wq, xs_state_users == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void xs_suspend_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) xb_dev_generation_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) spin_lock(&xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) xs_suspend_active--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) spin_unlock(&xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) wake_up_all(&xs_state_enter_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static uint32_t xs_request_enter(struct xb_req_data *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) uint32_t rq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) req->type = req->msg.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spin_lock(&xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) while (!xs_state_users && xs_suspend_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) spin_unlock(&xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) wait_event(xs_state_enter_wq, xs_suspend_active == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spin_lock(&xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (req->type == XS_TRANSACTION_START && !req->user_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) xs_state_users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) xs_state_users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) rq_id = xs_request_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) spin_unlock(&xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return rq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void xs_request_exit(struct xb_req_data *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) spin_lock(&xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) xs_state_users--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) (req->type == XS_TRANSACTION_END && !req->user_req &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) !WARN_ON_ONCE(req->msg.type == XS_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) !strcmp(req->body, "ENOENT"))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) xs_state_users--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) spin_unlock(&xs_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (xs_suspend_active && !xs_state_users)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) wake_up(&xs_state_exit_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int get_error(const char *errorstring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (i == ARRAY_SIZE(xsd_errors) - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) pr_warn("xen store gave: unknown error %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) errorstring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return xsd_errors[i].errnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static bool xenbus_ok(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) switch (xen_store_domain_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) case XS_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) switch (system_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) case SYSTEM_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) case SYSTEM_RESTART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) case SYSTEM_HALT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) case XS_PV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) case XS_HVM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* FIXME: Could check that the remote domain is alive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * but it is normally initial domain. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static bool test_reply(struct xb_req_data *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (req->state == xb_req_state_got_reply || !xenbus_ok()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* read req->state before all other fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) virt_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Make sure to reread req->state each time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static void *read_reply(struct xb_req_data *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) wait_event(req->wq, test_reply(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!xenbus_ok())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * If we are in the process of being shut-down there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * no point of trying to contact XenBus - it is either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * killed (xenstored application) or the other domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * has been killed or is unreachable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (req->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return ERR_PTR(req->err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) } while (req->state != xb_req_state_got_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return req->body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) bool notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) req->msg = *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) req->err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) req->state = xb_req_state_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) init_waitqueue_head(&req->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Save the caller req_id and restore it later in the reply */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) req->caller_req_id = req->msg.req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) req->msg.req_id = xs_request_enter(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) mutex_lock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) list_add_tail(&req->list, &xb_write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) notify = list_is_singular(&xb_write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) mutex_unlock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) wake_up(&xb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ret = read_reply(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) xs_request_exit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) msg->type = req->msg.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) msg->len = req->msg.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) mutex_lock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (req->state == xb_req_state_queued ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) req->state == xb_req_state_wait_reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) req->state = xb_req_state_aborted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) mutex_unlock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void xs_wake_up(struct xb_req_data *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) wake_up(&req->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct xb_req_data *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct kvec *vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) req = kmalloc(sizeof(*req) + sizeof(*vec), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) vec = (struct kvec *)(req + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) vec->iov_len = msg->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) vec->iov_base = msg + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) req->vec = vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) req->num_vecs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) req->cb = xenbus_dev_queue_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) req->par = par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) req->user_req = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) xs_send(req, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) EXPORT_SYMBOL(xenbus_dev_request_and_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static void *xs_talkv(struct xenbus_transaction t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) enum xsd_sockmsg_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) const struct kvec *iovec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unsigned int num_vecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct xb_req_data *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct xsd_sockmsg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) void *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) req = kmalloc(sizeof(*req), GFP_NOIO | __GFP_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) req->vec = iovec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) req->num_vecs = num_vecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) req->cb = xs_wake_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) req->user_req = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) msg.req_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) msg.tx_id = t.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) msg.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) msg.len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) for (i = 0; i < num_vecs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) msg.len += iovec[i].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) xs_send(req, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ret = xs_wait_for_reply(req, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) *len = msg.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (IS_ERR(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (msg.type == XS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) err = get_error(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) kfree(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return ERR_PTR(-err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (msg.type != type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) pr_warn_ratelimited("unexpected type [%d], expected [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) msg.type, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) kfree(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Simplified version of xs_talkv: single message. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static void *xs_single(struct xenbus_transaction t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) enum xsd_sockmsg_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) const char *string,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct kvec iovec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) iovec.iov_base = (void *)string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) iovec.iov_len = strlen(string) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return xs_talkv(t, type, &iovec, 1, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Many commands only need an ack, don't care what it says. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static int xs_error(char *reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (IS_ERR(reply))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return PTR_ERR(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) kfree(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static unsigned int count_strings(const char *strings, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) unsigned int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) const char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static char *join(const char *dir, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (strlen(name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static char **split(char *strings, unsigned int len, unsigned int *num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) char *p, **ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Count the strings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) *num = count_strings(strings, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Transfer to one big alloc for easy freeing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) kfree(strings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) memcpy(&ret[*num], strings, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) kfree(strings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) strings = (char *)&ret[*num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ret[(*num)++] = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) char **xenbus_directory(struct xenbus_transaction t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) const char *dir, const char *node, unsigned int *num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) char *strings, *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) path = join(dir, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return (char **)path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) strings = xs_single(t, XS_DIRECTORY, path, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (IS_ERR(strings))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return (char **)strings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return split(strings, len, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) EXPORT_SYMBOL_GPL(xenbus_directory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* Check if a path exists. Return 1 if it does. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int xenbus_exists(struct xenbus_transaction t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) const char *dir, const char *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) char **d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int dir_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) d = xenbus_directory(t, dir, node, &dir_n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (IS_ERR(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) EXPORT_SYMBOL_GPL(xenbus_exists);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Get the value of a single file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Returns a kmalloced value: call free() on it after use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * len indicates length in bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) void *xenbus_read(struct xenbus_transaction t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) const char *dir, const char *node, unsigned int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) char *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) path = join(dir, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return (void *)path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ret = xs_single(t, XS_READ, path, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) EXPORT_SYMBOL_GPL(xenbus_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Write the value of a single file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * Returns -err on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int xenbus_write(struct xenbus_transaction t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) const char *dir, const char *node, const char *string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) const char *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct kvec iovec[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) path = join(dir, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) iovec[0].iov_base = (void *)path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) iovec[0].iov_len = strlen(path) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) iovec[1].iov_base = (void *)string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) iovec[1].iov_len = strlen(string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) EXPORT_SYMBOL_GPL(xenbus_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* Create a new directory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int xenbus_mkdir(struct xenbus_transaction t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) const char *dir, const char *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) char *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) path = join(dir, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) EXPORT_SYMBOL_GPL(xenbus_mkdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* Destroy a file or directory (directories must be empty). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) char *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) path = join(dir, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ret = xs_error(xs_single(t, XS_RM, path, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) EXPORT_SYMBOL_GPL(xenbus_rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Start a transaction: changes by others will not be seen during this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * transaction, and changes will not be visible to others until end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int xenbus_transaction_start(struct xenbus_transaction *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) char *id_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (IS_ERR(id_str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return PTR_ERR(id_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) t->id = simple_strtoul(id_str, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) kfree(id_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) EXPORT_SYMBOL_GPL(xenbus_transaction_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* End a transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * If abandon is true, transaction is discarded instead of committed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int xenbus_transaction_end(struct xenbus_transaction t, int abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) char abortstr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) strcpy(abortstr, "F");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) strcpy(abortstr, "T");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) EXPORT_SYMBOL_GPL(xenbus_transaction_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* Single read and scanf: returns -errno or num scanned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int xenbus_scanf(struct xenbus_transaction t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) const char *dir, const char *node, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) char *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) val = xenbus_read(t, dir, node, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (IS_ERR(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return PTR_ERR(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret = vsscanf(val, fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) kfree(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* Distinctive errno. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) EXPORT_SYMBOL_GPL(xenbus_scanf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* Read an (optional) unsigned value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) unsigned int xenbus_read_unsigned(const char *dir, const char *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) unsigned int default_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) val = default_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) EXPORT_SYMBOL_GPL(xenbus_read_unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* Single printf and write: returns -errno or 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) int xenbus_printf(struct xenbus_transaction t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) const char *dir, const char *node, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ret = xenbus_write(t, dir, node, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) EXPORT_SYMBOL_GPL(xenbus_printf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) va_start(ap, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) const char *fmt = va_arg(ap, char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) void *result = va_arg(ap, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) p = xenbus_read(t, dir, name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (IS_ERR(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ret = PTR_ERR(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (sscanf(p, fmt, result) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) *(char **)result = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) EXPORT_SYMBOL_GPL(xenbus_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static int xs_watch(const char *path, const char *token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct kvec iov[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) iov[0].iov_base = (void *)path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) iov[0].iov_len = strlen(path) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) iov[1].iov_base = (void *)token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) iov[1].iov_len = strlen(token) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) ARRAY_SIZE(iov), NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static int xs_unwatch(const char *path, const char *token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct kvec iov[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) iov[0].iov_base = (char *)path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) iov[0].iov_len = strlen(path) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) iov[1].iov_base = (char *)token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) iov[1].iov_len = strlen(token) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ARRAY_SIZE(iov), NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static struct xenbus_watch *find_watch(const char *token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct xenbus_watch *i, *cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) cmp = (void *)simple_strtoul(token, NULL, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) list_for_each_entry(i, &watches, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (i == cmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) int xs_watch_msg(struct xs_watch_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (count_strings(event->body, event->len) != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) kfree(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) event->path = (const char *)event->body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) event->token = (const char *)strchr(event->body, '\0') + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) spin_lock(&watches_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) event->handle = find_watch(event->token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (event->handle != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) (!event->handle->will_handle ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) event->handle->will_handle(event->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) event->path, event->token))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) spin_lock(&watch_events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) list_add_tail(&event->list, &watch_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) event->handle->nr_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) wake_up(&watch_events_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) spin_unlock(&watch_events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) kfree(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) spin_unlock(&watches_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * Certain older XenBus toolstack cannot handle reading values that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * not populated. Some Xen 3.4 installation are incapable of doing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * so if we are running on anything older than 4 do not attempt to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * control/platform-feature-xs_reset_watches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static bool xen_strict_xenbus_quirk(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) uint32_t eax, ebx, ecx, edx, base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) base = xen_cpuid_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) cpuid(base + 1, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if ((eax >> 16) < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static void xs_reset_watches(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!xen_hvm_domain() || xen_initial_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (xen_strict_xenbus_quirk())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (!xenbus_read_unsigned("control",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) "platform-feature-xs_reset_watches", 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (err && err != -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) pr_warn("xs_reset_watches failed: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* Register callback to watch this node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) int register_xenbus_watch(struct xenbus_watch *watch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* Pointer in ascii is the token. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) char token[sizeof(watch) * 2 + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) sprintf(token, "%lX", (long)watch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) watch->nr_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) down_read(&xs_watch_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) spin_lock(&watches_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) BUG_ON(find_watch(token));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) list_add(&watch->list, &watches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) spin_unlock(&watches_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) err = xs_watch(watch->node, token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) spin_lock(&watches_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) list_del(&watch->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) spin_unlock(&watches_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) up_read(&xs_watch_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) EXPORT_SYMBOL_GPL(register_xenbus_watch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) void unregister_xenbus_watch(struct xenbus_watch *watch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct xs_watch_event *event, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) char token[sizeof(watch) * 2 + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) sprintf(token, "%lX", (long)watch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) down_read(&xs_watch_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) spin_lock(&watches_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) BUG_ON(!find_watch(token));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) list_del(&watch->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) spin_unlock(&watches_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) err = xs_unwatch(watch->node, token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) pr_warn("Failed to release watch %s: %i\n", watch->node, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) up_read(&xs_watch_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /* Make sure there are no callbacks running currently (unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) its us) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (current->pid != xenwatch_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) mutex_lock(&xenwatch_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* Cancel pending watch events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) spin_lock(&watch_events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (watch->nr_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) list_for_each_entry_safe(event, tmp, &watch_events, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (event->handle != watch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) list_del(&event->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) kfree(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) watch->nr_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) spin_unlock(&watch_events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (current->pid != xenwatch_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) mutex_unlock(&xenwatch_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) void xs_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) xs_suspend_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) down_write(&xs_watch_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) mutex_lock(&xs_response_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) void xs_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct xenbus_watch *watch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) char token[sizeof(watch) * 2 + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) xb_init_comms();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) mutex_unlock(&xs_response_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) xs_suspend_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* No need for watches_lock: the xs_watch_rwsem is sufficient. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) list_for_each_entry(watch, &watches, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) sprintf(token, "%lX", (long)watch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) xs_watch(watch->node, token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) up_write(&xs_watch_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) void xs_suspend_cancel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) mutex_unlock(&xs_response_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) up_write(&xs_watch_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) xs_suspend_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static int xenwatch_thread(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct xs_watch_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) xenwatch_pid = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) wait_event_interruptible(watch_events_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) !list_empty(&watch_events));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) mutex_lock(&xenwatch_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) spin_lock(&watch_events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) event = list_first_entry_or_null(&watch_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct xs_watch_event, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) list_del(&event->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) event->handle->nr_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) spin_unlock(&watch_events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) event->handle->callback(event->handle, event->path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) event->token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) kfree(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) mutex_unlock(&xenwatch_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * Wake up all threads waiting for a xenstore reply. In case of shutdown all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * pending replies will be marked as "aborted" in order to let the waiters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * return in spite of xenstore possibly no longer being able to reply. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * will avoid blocking shutdown by a thread waiting for xenstore but being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * necessary for shutdown processing to proceed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) static int xs_reboot_notify(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) unsigned long code, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct xb_req_data *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) mutex_lock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) list_for_each_entry(req, &xs_reply_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) wake_up(&req->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) list_for_each_entry(req, &xb_write_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) wake_up(&req->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) mutex_unlock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static struct notifier_block xs_reboot_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) .notifier_call = xs_reboot_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int xs_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) register_reboot_notifier(&xs_reboot_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* Initialize the shared memory rings to talk to xenstored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) err = xb_init_comms();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) task = kthread_run(xenwatch_thread, NULL, "xenwatch");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (IS_ERR(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return PTR_ERR(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* shutdown watches for kexec boot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) xs_reset_watches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }