^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Xenbus code for blkif backend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) Copyright (C) 2005 XenSource Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) "xen-blkback: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <stdarg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <xen/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* On the XenBus the max length of 'ring-ref%u'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define RINGREF_NAME_LEN (20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct backend_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct xenbus_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct xen_blkif *blkif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct xenbus_watch backend_watch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) char *mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static struct kmem_cache *xen_blkif_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static void connect(struct backend_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static int connect_ring(struct backend_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static void backend_changed(struct xenbus_watch *, const char *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) const char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static void xen_blkif_free(struct xen_blkif *blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static void xen_vbd_free(struct xen_vbd *vbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return be->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The last request could free the device from softirq context and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * xen_blkif_free() can sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void xen_blkif_deferred_free(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct xen_blkif *blkif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) blkif = container_of(work, struct xen_blkif, free_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) xen_blkif_free(blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static int blkback_name(struct xen_blkif *blkif, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) char *devpath, *devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct xenbus_device *dev = blkif->be->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (IS_ERR(devpath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return PTR_ERR(devpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) devname = strstr(devpath, "/dev/");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (devname != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) devname += strlen("/dev/");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) devname = devpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) kfree(devpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static void xen_update_blkif_status(struct xen_blkif *blkif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) char name[TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct xen_blkif_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Not ready to connect? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Already connected? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (blkif->be->dev->state == XenbusStateConnected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Attempt to connect: exit if we fail to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) connect(blkif->be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (blkif->be->dev->state != XenbusStateConnected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) err = blkback_name(blkif, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) xenbus_dev_error(blkif->be->dev, err, "block flush");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) for (i = 0; i < blkif->nr_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ring = &blkif->rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (IS_ERR(ring->xenblkd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) err = PTR_ERR(ring->xenblkd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ring->xenblkd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) xenbus_dev_fatal(blkif->be->dev, err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) "start %s-%d xenblkd", name, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) while (--i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ring = &blkif->rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) kthread_stop(ring->xenblkd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (!blkif->rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) for (r = 0; r < blkif->nr_rings; r++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct xen_blkif_ring *ring = &blkif->rings[r];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) spin_lock_init(&ring->blk_ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) init_waitqueue_head(&ring->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) INIT_LIST_HEAD(&ring->pending_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) INIT_LIST_HEAD(&ring->persistent_purge_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) gnttab_page_cache_init(&ring->free_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) spin_lock_init(&ring->pending_free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) init_waitqueue_head(&ring->pending_free_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) init_waitqueue_head(&ring->shutdown_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ring->blkif = blkif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ring->st_print = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ring->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static struct xen_blkif *xen_blkif_alloc(domid_t domid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct xen_blkif *blkif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!blkif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) blkif->domid = domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) atomic_set(&blkif->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) init_completion(&blkif->drain_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Because freeing back to the cache may be deferred, it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * safe to unload the module (and hence destroy the cache) until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * this has completed. To prevent premature unloading, take an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * extra module reference here and release only when the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * has been freed back to the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) __module_get(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return blkif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned int nr_grefs, unsigned int evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct xen_blkif *blkif = ring->blkif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) const struct blkif_common_sring *sring_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) RING_IDX rsp_prod, req_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Already connected through? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (ring->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) &ring->blk_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) sring_common = (struct blkif_common_sring *)ring->blk_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) rsp_prod = READ_ONCE(sring_common->rsp_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) req_prod = READ_ONCE(sring_common->req_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) switch (blkif->blk_protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) case BLKIF_PROTOCOL_NATIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct blkif_sring *sring_native =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) (struct blkif_sring *)ring->blk_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) BACK_RING_ATTACH(&ring->blk_rings.native, sring_native,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) rsp_prod, XEN_PAGE_SIZE * nr_grefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) case BLKIF_PROTOCOL_X86_32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct blkif_x86_32_sring *sring_x86_32 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) (struct blkif_x86_32_sring *)ring->blk_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rsp_prod, XEN_PAGE_SIZE * nr_grefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) case BLKIF_PROTOCOL_X86_64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct blkif_x86_64_sring *sring_x86_64 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) (struct blkif_x86_64_sring *)ring->blk_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) rsp_prod, XEN_PAGE_SIZE * nr_grefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (req_prod - rsp_prod > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ring->irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ring->blk_rings.common.sring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static int xen_blkif_disconnect(struct xen_blkif *blkif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct pending_req *req, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned int j, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) bool busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) for (r = 0; r < blkif->nr_rings; r++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct xen_blkif_ring *ring = &blkif->rings[r];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!ring->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (ring->xenblkd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) kthread_stop(ring->xenblkd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ring->xenblkd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) wake_up(&ring->shutdown_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* The above kthread_stop() guarantees that at this point we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * don't have any discard_io or other_io requests. So, checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * for inflight IO is enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (atomic_read(&ring->inflight) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (ring->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unbind_from_irqhandler(ring->irq, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ring->irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (ring->blk_rings.common.sring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ring->blk_rings.common.sring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* Remove all persistent grants and the cache of ballooned pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) xen_blkbk_free_caches(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Check that there is no request in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) list_del(&req->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) kfree(req->segments[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) for (j = 0; j < MAX_INDIRECT_PAGES; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) kfree(req->indirect_pages[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) BUG_ON(!list_empty(&ring->persistent_purge_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) BUG_ON(ring->free_pages.num_pages != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) BUG_ON(ring->persistent_gnt_c != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ring->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) blkif->nr_ring_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * blkif->rings was allocated in connect_ring, so we should free it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) kfree(blkif->rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) blkif->rings = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) blkif->nr_rings = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static void xen_blkif_free(struct xen_blkif *blkif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) WARN_ON(xen_blkif_disconnect(blkif));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) xen_vbd_free(&blkif->vbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) kfree(blkif->be->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) kfree(blkif->be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Make sure everything is drained before shutting down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) kmem_cache_free(xen_blkif_cachep, blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int __init xen_blkif_interface_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) xen_blkif_cachep = kmem_cache_create("blkif_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) sizeof(struct xen_blkif),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (!xen_blkif_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) void xen_blkif_interface_fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) kmem_cache_destroy(xen_blkif_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) xen_blkif_cachep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * sysfs interface for VBD I/O requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #define VBD_SHOW_ALLRING(name, format) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static ssize_t show_##name(struct device *_dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct device_attribute *attr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct xenbus_device *dev = to_xenbus_device(_dev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct backend_info *be = dev_get_drvdata(&dev->dev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct xen_blkif *blkif = be->blkif; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) unsigned int i; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned long long result = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!blkif->rings) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) goto out; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) for (i = 0; i < blkif->nr_rings; i++) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct xen_blkif_ring *ring = &blkif->rings[i]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) result += ring->st_##name; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) out: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return sprintf(buf, format, result); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static DEVICE_ATTR(name, 0444, show_##name, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) VBD_SHOW_ALLRING(oo_req, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) VBD_SHOW_ALLRING(rd_req, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) VBD_SHOW_ALLRING(wr_req, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) VBD_SHOW_ALLRING(f_req, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) VBD_SHOW_ALLRING(ds_req, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) VBD_SHOW_ALLRING(rd_sect, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) VBD_SHOW_ALLRING(wr_sect, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static struct attribute *xen_vbdstat_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) &dev_attr_oo_req.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) &dev_attr_rd_req.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) &dev_attr_wr_req.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) &dev_attr_f_req.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) &dev_attr_ds_req.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) &dev_attr_rd_sect.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) &dev_attr_wr_sect.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static const struct attribute_group xen_vbdstat_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) .name = "statistics",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) .attrs = xen_vbdstat_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #define VBD_SHOW(name, format, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static ssize_t show_##name(struct device *_dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct device_attribute *attr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct xenbus_device *dev = to_xenbus_device(_dev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct backend_info *be = dev_get_drvdata(&dev->dev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return sprintf(buf, format, ##args); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static DEVICE_ATTR(name, 0444, show_##name, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) VBD_SHOW(mode, "%s\n", be->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static int xenvbd_sysfs_addif(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) error = device_create_file(&dev->dev, &dev_attr_physical_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) error = device_create_file(&dev->dev, &dev_attr_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) goto fail3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) fail2: device_remove_file(&dev->dev, &dev_attr_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void xenvbd_sysfs_delif(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) device_remove_file(&dev->dev, &dev_attr_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) device_remove_file(&dev->dev, &dev_attr_physical_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static void xen_vbd_free(struct xen_vbd *vbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (vbd->bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) vbd->bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* Enable the persistent grants feature. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static bool feature_persistent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) module_param(feature_persistent, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) MODULE_PARM_DESC(feature_persistent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) "Enables the persistent grants feature");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned major, unsigned minor, int readonly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int cdrom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct xen_vbd *vbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) vbd = &blkif->vbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) vbd->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) vbd->readonly = readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) vbd->type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) vbd->pdevice = MKDEV(major, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) FMODE_READ : FMODE_WRITE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (IS_ERR(bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) pr_warn("xen_vbd_create: device %08x could not be opened\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) vbd->pdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) vbd->bdev = bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (vbd->bdev->bd_disk == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pr_warn("xen_vbd_create: device %08x doesn't exist\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) vbd->pdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) xen_vbd_free(vbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) vbd->size = vbd_sz(vbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) vbd->type |= VDISK_CDROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) vbd->type |= VDISK_REMOVABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) vbd->flush_support = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (q && blk_queue_secure_erase(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) vbd->discard_secure = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) vbd->feature_gnt_persistent = feature_persistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) pr_debug("Successful creation of handle=%04x (dom=%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) handle, blkif->domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int xen_blkbk_remove(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct backend_info *be = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (be->major || be->minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) xenvbd_sysfs_delif(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (be->backend_watch.node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) unregister_xenbus_watch(&be->backend_watch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) kfree(be->backend_watch.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) be->backend_watch.node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dev_set_drvdata(&dev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (be->blkif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) xen_blkif_disconnect(be->blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* Put the reference we set in xen_blkif_alloc(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) xen_blkif_put(be->blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct backend_info *be, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct xenbus_device *dev = be->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) "%d", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct xenbus_device *dev = be->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct xen_blkif *blkif = be->blkif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) int state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct block_device *bdev = be->blkif->vbd.bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct request_queue *q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (blk_queue_discard(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) err = xenbus_printf(xbt, dev->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) "discard-granularity", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) q->limits.discard_granularity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) err = xenbus_printf(xbt, dev->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) "discard-alignment", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) q->limits.discard_alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* Optional. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) err = xenbus_printf(xbt, dev->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) "discard-secure", "%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) blkif->vbd.discard_secure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dev_warn(&dev->dev, "writing discard-secure (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) err = xenbus_printf(xbt, dev->nodename, "feature-discard",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) "%d", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) dev_warn(&dev->dev, "writing feature-discard (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) int xen_blkbk_barrier(struct xenbus_transaction xbt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct backend_info *be, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct xenbus_device *dev = be->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) "%d", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * Entry point to this code when a new device is created. Allocate the basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * structures, and watch the store waiting for the hotplug scripts to tell us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * the device's physical major and minor numbers. Switch to InitWait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static int xen_blkbk_probe(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) const struct xenbus_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct backend_info *be = kzalloc(sizeof(struct backend_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* match the pr_debug in xen_blkbk_remove */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!be) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) xenbus_dev_fatal(dev, -ENOMEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) "allocating backend structure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) be->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dev_set_drvdata(&dev->dev, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) be->blkif = xen_blkif_alloc(dev->otherend_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (IS_ERR(be->blkif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) err = PTR_ERR(be->blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) be->blkif = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) xenbus_dev_fatal(dev, err, "creating block interface");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) err = xenbus_printf(XBT_NIL, dev->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) "feature-max-indirect-segments", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) MAX_INDIRECT_SEGMENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) dev_warn(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) "writing %s/feature-max-indirect-segments (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dev->nodename, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* Multi-queue: advertise how many queues are supported by us.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) err = xenbus_printf(XBT_NIL, dev->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) "multi-queue-max-queues", "%u", xenblk_max_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) pr_warn("Error writing multi-queue-max-queues\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* setup back pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) be->blkif->be = be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) backend_changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) "%s/%s", dev->nodename, "physical-device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) xen_blkif_max_ring_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) pr_warn("%s write out 'max-ring-page-order' failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) err = xenbus_switch_state(dev, XenbusStateInitWait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) pr_warn("%s failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) xen_blkbk_remove(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * Callback received when the hotplug scripts have placed the physical-device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * node. Read it and the mode node, and create a vbd. If the frontend is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * ready, connect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void backend_changed(struct xenbus_watch *watch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) const char *path, const char *token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) unsigned major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) unsigned minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct backend_info *be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) = container_of(watch, struct backend_info, backend_watch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct xenbus_device *dev = be->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) int cdrom = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) unsigned long handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) char *device_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) &major, &minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (XENBUS_EXIST_ERR(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Since this watch will fire once immediately after it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * registered, we expect this. Ignore it, and wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * hotplug scripts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (err != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) xenbus_dev_fatal(dev, err, "reading physical-device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (be->major | be->minor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (be->major != major || be->minor != minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) be->major, be->minor, major, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (IS_ERR(be->mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) err = PTR_ERR(be->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) be->mode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) xenbus_dev_fatal(dev, err, "reading mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!IS_ERR(device_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) cdrom = strcmp(device_type, "cdrom") == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) kfree(device_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Front end dir is a number, which is used as the handle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) kfree(be->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) be->mode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) be->major = major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) be->minor = minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) err = xen_vbd_create(be->blkif, handle, major, minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) !strchr(be->mode, 'w'), cdrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) xenbus_dev_fatal(dev, err, "creating vbd structure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) err = xenvbd_sysfs_addif(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) xen_vbd_free(&be->blkif->vbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) xenbus_dev_fatal(dev, err, "creating sysfs entries");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) kfree(be->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) be->mode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) be->major = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) be->minor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* We're potentially connected now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) xen_update_blkif_status(be->blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * Callback received when the frontend's state changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static void frontend_changed(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) enum xenbus_state frontend_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct backend_info *be = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) switch (frontend_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) case XenbusStateInitialising:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (dev->state == XenbusStateClosed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) pr_info("%s: prepare for reconnect\n", dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) xenbus_switch_state(dev, XenbusStateInitWait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) case XenbusStateInitialised:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) case XenbusStateConnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * Ensure we connect even when two watches fire in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * close succession and we miss the intermediate value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * of frontend_state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (dev->state == XenbusStateConnected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * Enforce precondition before potential leak point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * xen_blkif_disconnect() is idempotent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) err = xen_blkif_disconnect(be->blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) xenbus_dev_fatal(dev, err, "pending I/O");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) err = connect_ring(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * Clean up so that memory resources can be used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * other devices. connect_ring reported already error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) xen_blkif_disconnect(be->blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) xen_update_blkif_status(be->blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) case XenbusStateClosing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) xenbus_switch_state(dev, XenbusStateClosing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) case XenbusStateClosed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) xen_blkif_disconnect(be->blkif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) xenbus_switch_state(dev, XenbusStateClosed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (xenbus_dev_is_online(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* if not online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) case XenbusStateUnknown:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) device_unregister(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) frontend_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* Once a memory pressure is detected, squeeze free page pools for a while. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static unsigned int buffer_squeeze_duration_ms = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) module_param_named(buffer_squeeze_duration_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) buffer_squeeze_duration_ms, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) MODULE_PARM_DESC(buffer_squeeze_duration_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) "Duration in ms to squeeze pages buffer when a memory pressure is detected");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * Callback received when the memory pressure is detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static void reclaim_memory(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct backend_info *be = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (!be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) be->blkif->buffer_squeeze_end = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) msecs_to_jiffies(buffer_squeeze_duration_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* ** Connection ** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * Write the physical details regarding the block device to the store, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * switch to Connected state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) static void connect(struct backend_info *be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct xenbus_transaction xbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct xenbus_device *dev = be->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) pr_debug("%s %s\n", __func__, dev->otherend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* Supply the information about the device the frontend needs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) err = xenbus_transaction_start(&xbt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) xenbus_dev_fatal(dev, err, "starting transaction");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* If we can't advertise it is OK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) xen_blkbk_discard(xbt, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) be->blkif->vbd.feature_gnt_persistent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) (unsigned long long)vbd_sz(&be->blkif->vbd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) xenbus_dev_fatal(dev, err, "writing %s/sectors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* FIXME: use a typename instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) err = xenbus_printf(xbt, dev->nodename, "info", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) be->blkif->vbd.type |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) xenbus_dev_fatal(dev, err, "writing %s/info",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) (unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) bdev_logical_block_size(be->blkif->vbd.bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) xenbus_dev_fatal(dev, err, "writing %s/sector-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) bdev_physical_block_size(be->blkif->vbd.bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) err = xenbus_transaction_end(xbt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (err == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) xenbus_dev_fatal(dev, err, "ending transaction");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) err = xenbus_switch_state(dev, XenbusStateConnected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) xenbus_transaction_end(xbt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * Each ring may have multi pages, depends on "ring-page-order".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct pending_req *req, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) int err, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct xen_blkif *blkif = ring->blkif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct xenbus_device *dev = blkif->be->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) unsigned int nr_grefs, evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) &evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (err != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) nr_grefs = blkif->nr_ring_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (unlikely(!nr_grefs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) WARN_ON(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) for (i = 0; i < nr_grefs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) char ring_ref_name[RINGREF_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (blkif->multi_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) WARN_ON(i != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) "%u", &ring_ref[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (err != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) xenbus_dev_fatal(dev, err, "reading %s/%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) dir, ring_ref_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) req = kzalloc(sizeof(*req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) list_add_tail(&req->free_list, &ring->pending_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (!req->segments[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (!req->indirect_pages[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* Map the shared frame, irq etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) list_del(&req->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (!req->segments[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) kfree(req->segments[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (!req->indirect_pages[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) kfree(req->indirect_pages[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static int connect_ring(struct backend_info *be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct xenbus_device *dev = be->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct xen_blkif *blkif = be->blkif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) char protocol[64] = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) char *xspath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) size_t xspathsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) unsigned int requested_num_queues = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) unsigned int ring_page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) pr_debug("%s %s\n", __func__, dev->otherend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) "%63s", protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (err <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) strcpy(protocol, "unspecified, assuming default");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (blkif->vbd.feature_gnt_persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) blkif->vbd.feature_gnt_persistent =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) xenbus_read_unsigned(dev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) "feature-persistent", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) blkif->vbd.overflow_max_grants = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * Read the number of hardware queues from frontend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) requested_num_queues = xenbus_read_unsigned(dev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) "multi-queue-num-queues",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (requested_num_queues > xenblk_max_queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) || requested_num_queues == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* Buggy or malicious guest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) xenbus_dev_fatal(dev, err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) "guest requested %u queues, exceeding the maximum of %u.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) requested_num_queues, xenblk_max_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) blkif->nr_rings = requested_num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (xen_blkif_alloc_rings(blkif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) blkif->nr_rings, blkif->blk_protocol, protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) &ring_page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (err != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) blkif->nr_ring_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) blkif->multi_ref = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) } else if (ring_page_order <= xen_blkif_max_ring_order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) blkif->nr_ring_pages = 1 << ring_page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) blkif->multi_ref = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) xenbus_dev_fatal(dev, err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) "requested ring page order %d exceed max:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) ring_page_order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) xen_blkif_max_ring_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (blkif->nr_rings == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return read_per_ring_refs(&blkif->rings[0], dev->otherend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) xspath = kmalloc(xspathsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (!xspath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) xenbus_dev_fatal(dev, -ENOMEM, "reading ring references");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) for (i = 0; i < blkif->nr_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) memset(xspath, 0, xspathsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) err = read_per_ring_refs(&blkif->rings[i], xspath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) kfree(xspath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) kfree(xspath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static const struct xenbus_device_id xen_blkbk_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) { "vbd" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) { "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static struct xenbus_driver xen_blkbk_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) .ids = xen_blkbk_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) .probe = xen_blkbk_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) .remove = xen_blkbk_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) .otherend_changed = frontend_changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) .allow_rebind = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) .reclaim_memory = reclaim_memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) int xen_blkif_xenbus_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return xenbus_register_backend(&xen_blkbk_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) void xen_blkif_xenbus_fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) xenbus_unregister_driver(&xen_blkbk_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }