^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * xenbus_comms.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Low level code to talks to Xen Store: ringbuffer and event channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2005 Rusty Russell, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "xenbus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* A list of replies. Currently only one will ever be outstanding. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) LIST_HEAD(xs_reply_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* A list of write requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) LIST_HEAD(xb_write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) DEFINE_MUTEX(xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Protect xenbus reader thread against save/restore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) DEFINE_MUTEX(xs_response_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static int xenbus_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct task_struct *xenbus_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static irqreturn_t wake_waiting(int irq, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) wake_up(&xb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return ((prod - cons) <= XENSTORE_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static void *get_output_chunk(XENSTORE_RING_IDX cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) XENSTORE_RING_IDX prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) char *buf, uint32_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *len = XENSTORE_RING_SIZE - (prod - cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return buf + MASK_XENSTORE_IDX(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static const void *get_input_chunk(XENSTORE_RING_IDX cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) XENSTORE_RING_IDX prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) const char *buf, uint32_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if ((prod - cons) < *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *len = prod - cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return buf + MASK_XENSTORE_IDX(cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int xb_data_to_write(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct xenstore_domain_interface *intf = xen_store_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) !list_empty(&xb_write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * xb_write - low level write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @data: buffer to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @len: length of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Returns number of bytes written or -err.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int xb_write(const void *data, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct xenstore_domain_interface *intf = xen_store_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) XENSTORE_RING_IDX cons, prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) while (len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned int avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* Read indexes, then verify. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) cons = intf->req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) prod = intf->req_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (!check_indexes(cons, prod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) intf->req_cons = intf->req_prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!xb_data_to_write())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Must write data /after/ reading the consumer index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) virt_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) dst = get_output_chunk(cons, prod, intf->req, &avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (avail == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (avail > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) avail = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) memcpy(dst, data, avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) data += avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) len -= avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bytes += avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Other side must not see new producer until data is there. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) virt_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) intf->req_prod += avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* Implies mb(): other side will see the updated producer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (prod <= intf->req_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) notify_remote_via_evtchn(xen_store_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int xb_data_to_read(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct xenstore_domain_interface *intf = xen_store_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return (intf->rsp_cons != intf->rsp_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static int xb_read(void *data, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct xenstore_domain_interface *intf = xen_store_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) XENSTORE_RING_IDX cons, prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) while (len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned int avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) const char *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* Read indexes, then verify. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cons = intf->rsp_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) prod = intf->rsp_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (cons == prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!check_indexes(cons, prod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) intf->rsp_cons = intf->rsp_prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) src = get_input_chunk(cons, prod, intf->rsp, &avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (avail == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (avail > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) avail = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Must read data /after/ reading the producer index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) virt_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) memcpy(data, src, avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) data += avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) len -= avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) bytes += avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Other side must not see free space until we've copied out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) virt_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) intf->rsp_cons += avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* Implies mb(): other side will see the updated consumer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (intf->rsp_prod - cons >= XENSTORE_RING_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) notify_remote_via_evtchn(xen_store_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int process_msg(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct xsd_sockmsg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) char *body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void *alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct xs_watch_event *watch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) bool in_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) bool in_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned int read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) } state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct xb_req_data *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!state.in_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) state.in_msg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) state.in_hdr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) state.read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * We must disallow save/restore while reading a message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * A partial read across s/r leaves us out of sync with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * xenstored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * xs_response_mutex is locked as long as we are processing one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * message. state.in_msg will be true as long as we are holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * the lock here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) mutex_lock(&xs_response_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!xb_data_to_read()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* We raced with save/restore: pending data 'gone'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) mutex_unlock(&xs_response_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) state.in_msg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (state.in_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (state.read != sizeof(state.msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) err = xb_read((void *)&state.msg + state.read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) sizeof(state.msg) - state.read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) state.read += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (state.read != sizeof(state.msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (state.msg.len > XENSTORE_PAYLOAD_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) len = state.msg.len + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (state.msg.type == XS_WATCH_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) len += sizeof(*state.watch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) state.alloc = kmalloc(len, GFP_NOIO | __GFP_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (!state.alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (state.msg.type == XS_WATCH_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) state.body = state.watch->body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) state.body = state.alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) state.in_hdr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) state.read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) err = xb_read(state.body + state.read, state.msg.len - state.read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) state.read += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (state.read != state.msg.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) state.body[state.msg.len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (state.msg.type == XS_WATCH_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) state.watch->len = state.msg.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) err = xs_watch_msg(state.watch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) mutex_lock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) list_for_each_entry(req, &xs_reply_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (req->msg.req_id == state.msg.req_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) mutex_unlock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (req->state == xb_req_state_wait_reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) req->msg.req_id = req->caller_req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) req->msg.type = state.msg.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) req->msg.len = state.msg.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) req->body = state.body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* write body, then update state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) virt_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) req->state = xb_req_state_got_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) req->cb(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mutex_unlock(&xs_response_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) state.in_msg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) state.alloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mutex_unlock(&xs_response_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) state.in_msg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) kfree(state.alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) state.alloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int process_writes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct xb_req_data *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) unsigned int written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) } state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!xb_data_to_write())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) mutex_lock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!state.req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) state.req = list_first_entry(&xb_write_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct xb_req_data, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) state.idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) state.written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (state.req->state == xb_req_state_aborted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) while (state.idx < state.req->num_vecs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (state.idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) base = &state.req->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) len = sizeof(state.req->msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) base = state.req->vec[state.idx].iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) len = state.req->vec[state.idx].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) err = xb_write(base + state.written, len - state.written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) state.written += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (state.written != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) state.idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) state.written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) list_del(&state.req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) state.req->state = xb_req_state_wait_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) list_add_tail(&state.req->list, &xs_reply_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) state.req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) mutex_unlock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) state.req->msg.type = XS_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) state.req->err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) list_del(&state.req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (state.req->state == xb_req_state_aborted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) kfree(state.req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* write err, then update state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) virt_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) state.req->state = xb_req_state_got_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) wake_up(&state.req->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) mutex_unlock(&xb_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) state.req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static int xb_thread_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return xb_data_to_read() || xb_data_to_write();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static int xenbus_thread(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (wait_event_interruptible(xb_waitq, xb_thread_work()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) err = process_msg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) else if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) pr_warn_ratelimited("error %d while reading message\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) err = process_writes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) pr_warn_ratelimited("error %d while writing message\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) xenbus_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * xb_init_comms - Set up interrupt handler off store event channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int xb_init_comms(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct xenstore_domain_interface *intf = xen_store_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (intf->req_prod != intf->req_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) pr_err("request ring is not quiescent (%08x:%08x)!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) intf->req_cons, intf->req_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (intf->rsp_prod != intf->rsp_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) pr_warn("response ring is not quiescent (%08x:%08x): fixing up\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) intf->rsp_cons, intf->rsp_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* breaks kdump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!reset_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) intf->rsp_cons = intf->rsp_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (xenbus_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* Already have an irq; assume we're resuming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) rebind_evtchn_irq(xen_store_evtchn, xenbus_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 0, "xenbus", &xb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) pr_err("request irq failed %i\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) xenbus_irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!xenbus_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) xenbus_task = kthread_run(xenbus_thread, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) "xenbus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (IS_ERR(xenbus_task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return PTR_ERR(xenbus_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) void xb_deinit_comms(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unbind_from_irqhandler(xenbus_irq, &xb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) xenbus_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }