^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Network block device - make block devices work over TCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Note that you can not swap over this thing, yet. Seems to work but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * deadlocks sometimes - you can not swap over TCP in general.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * (part of code stolen from loop.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/major.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/nbd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/nbd-netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <net/genetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <trace/events/nbd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static DEFINE_IDR(nbd_index_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static DEFINE_MUTEX(nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static int nbd_total_devices = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct nbd_sock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct mutex tx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct request *pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) bool dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int fallback_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct recv_thread_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct nbd_device *nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct link_dead_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define NBD_RT_TIMEDOUT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define NBD_RT_DISCONNECT_REQUESTED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define NBD_RT_DISCONNECTED 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define NBD_RT_HAS_PID_FILE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define NBD_RT_HAS_CONFIG_REF 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define NBD_RT_BOUND 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define NBD_RT_DISCONNECT_ON_CLOSE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define NBD_DESTROY_ON_DISCONNECT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define NBD_DISCONNECT_REQUESTED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct nbd_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long runtime_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u64 dead_conn_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct nbd_sock **socks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int num_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) atomic_t live_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) wait_queue_head_t conn_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) atomic_t recv_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) wait_queue_head_t recv_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) loff_t blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) loff_t bytesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #if IS_ENABLED(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct dentry *dbg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct nbd_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct blk_mq_tag_set tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) refcount_t config_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) refcount_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct nbd_config *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct mutex config_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct workqueue_struct *recv_workq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct task_struct *task_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct task_struct *task_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct completion *destroy_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define NBD_CMD_REQUEUED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct nbd_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct nbd_device *nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) blk_status_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 cmd_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #if IS_ENABLED(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static struct dentry *nbd_dbg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define nbd_name(nbd) ((nbd)->disk->disk_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define NBD_MAGIC 0x68797548
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define NBD_DEF_BLKSIZE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static unsigned int nbds_max = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static int max_part = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static int part_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int nbd_dev_dbg_init(struct nbd_device *nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void nbd_dev_dbg_close(struct nbd_device *nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static void nbd_config_put(struct nbd_device *nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void nbd_connect_reply(struct genl_info *info, int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void nbd_dead_link_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void nbd_disconnect_and_put(struct nbd_device *nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline struct device *nbd_to_dev(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return disk_to_dev(nbd->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void nbd_requeue_cmd(struct nbd_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct request *req = blk_mq_rq_from_pdu(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) blk_mq_requeue_request(req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define NBD_COOKIE_BITS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct request *req = blk_mq_rq_from_pdu(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 tag = blk_mq_unique_tag(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u64 cookie = cmd->cmd_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return (cookie << NBD_COOKIE_BITS) | tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static u32 nbd_handle_to_tag(u64 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return (u32)handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static u32 nbd_handle_to_cookie(u64 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return (u32)(handle >> NBD_COOKIE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static const char *nbdcmd_to_ascii(int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) case NBD_CMD_READ: return "read";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) case NBD_CMD_WRITE: return "write";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) case NBD_CMD_DISC: return "disconnect";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) case NBD_CMD_FLUSH: return "flush";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) case NBD_CMD_TRIM: return "trim/discard";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return "invalid";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static ssize_t pid_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct gendisk *disk = dev_to_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static const struct device_attribute pid_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) .attr = { .name = "pid", .mode = 0444},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .show = pid_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static void nbd_dev_remove(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct gendisk *disk = nbd->disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) q = disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) del_gendisk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) blk_cleanup_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) blk_mq_free_tag_set(&nbd->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) disk->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) put_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * Place this in the last just before the nbd is freed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * make sure that the disk and the related kobject are also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * totally removed to avoid duplicate creation of the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) complete(nbd->destroy_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) kfree(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void nbd_put(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (refcount_dec_and_mutex_lock(&nbd->refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) &nbd_index_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) idr_remove(&nbd_index_idr, nbd->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) nbd_dev_remove(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int nbd_disconnected(struct nbd_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct link_dead_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (args) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) INIT_WORK(&args->work, nbd_dead_link_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) args->index = nbd->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) queue_work(system_wq, &args->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!nsock->dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (atomic_dec_return(&nbd->config->live_connections) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) &nbd->config->runtime_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) set_bit(NBD_RT_DISCONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) &nbd->config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) dev_info(nbd_to_dev(nbd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) "Disconnected due to user request.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) nsock->dead = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) nsock->pending = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) nsock->sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static void nbd_size_clear(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (nbd->config->bytesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) set_capacity(nbd->disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void nbd_size_update(struct nbd_device *nbd, bool start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct block_device *bdev = bdget_disk(nbd->disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) sector_t nr_sectors = config->bytesize >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (config->flags & NBD_FLAG_SEND_TRIM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) nbd->disk->queue->limits.discard_granularity = config->blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) nbd->disk->queue->limits.discard_alignment = config->blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) set_capacity(nbd->disk, nr_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (bdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (bdev->bd_disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bd_set_nr_sectors(bdev, nr_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) set_blocksize(bdev, config->blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) loff_t nr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) config->blksize = blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) config->bytesize = blocksize * nr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (nbd->task_recv != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) nbd_size_update(nbd, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void nbd_complete_rq(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) cmd->status ? "failed" : "done");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) blk_mq_end_request(req, cmd->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Forcibly shutdown the socket causing all listeners to error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void sock_shutdown(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (config->num_connections == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) for (i = 0; i < config->num_connections; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct nbd_sock *nsock = config->socks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mutex_lock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) nbd_mark_nsock_dead(nbd, nsock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) mutex_unlock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static u32 req_to_nbd_cmd_type(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) switch (req_op(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return NBD_CMD_TRIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) case REQ_OP_FLUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return NBD_CMD_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) case REQ_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return NBD_CMD_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) case REQ_OP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return NBD_CMD_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) bool reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct nbd_device *nbd = cmd->nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct nbd_config *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!mutex_trylock(&cmd->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return BLK_EH_RESET_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!refcount_inc_not_zero(&nbd->config_refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) cmd->status = BLK_STS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) mutex_unlock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (config->num_connections > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) (config->num_connections == 1 && nbd->tag_set.timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) dev_err_ratelimited(nbd_to_dev(nbd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) "Connection timed out, retrying (%d/%d alive)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) atomic_read(&config->live_connections),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) config->num_connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * Hooray we have more connections, requeue this IO, the submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * path will put it on a real connection. Or if only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * connection is configured, the submit path will wait util
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * a new connection is reconfigured or util dead timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (config->socks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (cmd->index < config->num_connections) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct nbd_sock *nsock =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) config->socks[cmd->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) mutex_lock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* We can have multiple outstanding requests, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * we don't want to mark the nsock dead if we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * already reconnected with a new socket, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * only mark it dead if its the same socket we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * were sent out on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (cmd->cookie == nsock->cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) nbd_mark_nsock_dead(nbd, nsock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) mutex_unlock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) mutex_unlock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) nbd_requeue_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return BLK_EH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (!nbd->tag_set.timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * Userspace sets timeout=0 to disable socket disconnection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * so just warn and reset the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct nbd_sock *nsock = config->socks[cmd->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) cmd->retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) (unsigned long long)blk_rq_pos(req) << 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mutex_lock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (cmd->cookie != nsock->cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) nbd_requeue_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) mutex_unlock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) mutex_unlock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return BLK_EH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) mutex_unlock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) mutex_unlock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return BLK_EH_RESET_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) cmd->status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) mutex_unlock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sock_shutdown(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) blk_mq_complete_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return BLK_EH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * Send or receive packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static int sock_xmit(struct nbd_device *nbd, int index, int send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct iov_iter *iter, int msg_flags, int *sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct socket *sock = config->socks[index]->sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) unsigned int noreclaim_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (unlikely(!sock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) dev_err_ratelimited(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) "Attempted %s on closed socket in sock_xmit\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) (send ? "send" : "recv"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) msg.msg_iter = *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) noreclaim_flag = memalloc_noreclaim_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) msg.msg_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) msg.msg_namelen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) msg.msg_control = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) msg.msg_controllen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) msg.msg_flags = msg_flags | MSG_NOSIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) result = sock_sendmsg(sock, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) result = sock_recvmsg(sock, &msg, msg.msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (result <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (result == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) result = -EPIPE; /* short read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *sent += result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) } while (msg_data_left(&msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) memalloc_noreclaim_restore(noreclaim_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * Different settings for sk->sk_sndtimeo can result in different return values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * if there is a signal pending when we enter sendmsg, because reasons?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static inline int was_interrupted(int result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return result == -ERESTARTSYS || result == -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* always call with the tx_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct request *req = blk_mq_rq_from_pdu(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct nbd_sock *nsock = config->socks[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct iov_iter from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned long size = blk_rq_bytes(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) u64 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) u32 nbd_cmd_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int sent = nsock->sent, skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) type = req_to_nbd_cmd_type(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (type == U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (rq_data_dir(req) == WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) (config->flags & NBD_FLAG_READ_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) dev_err_ratelimited(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) "Write on read-only\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (req->cmd_flags & REQ_FUA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* We did a partial send previously, and we at least sent the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * request struct, so just go and send the rest of the pages in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (sent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (sent >= sizeof(request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) skip = sent - sizeof(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* initialize handle for tracing purposes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) handle = nbd_cmd_handle(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) goto send_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) iov_iter_advance(&from, sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) cmd->cmd_cookie++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) cmd->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) cmd->cookie = nsock->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) cmd->retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) request.type = htonl(type | nbd_cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (type != NBD_CMD_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) request.len = htonl(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) handle = nbd_cmd_handle(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) memcpy(request.handle, &handle, sizeof(handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) req, nbdcmd_to_ascii(type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) result = sock_xmit(nbd, index, 1, &from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) trace_nbd_header_sent(req, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (result <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (was_interrupted(result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* If we havne't sent anything we can just return BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * however if we have sent something we need to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * sure we only allow this req to be sent until we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * completely done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (sent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) nsock->pending = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) nsock->sent = sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) set_bit(NBD_CMD_REQUEUED, &cmd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) dev_err_ratelimited(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) "Send control failed (result %d)\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) send_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (type != NBD_CMD_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) bio = req->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) while (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct bio *next = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) bio_for_each_segment(bvec, bio, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) bool is_last = !next && bio_iter_last(bvec, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) int flags = is_last ? 0 : MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) req, bvec.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (skip >= iov_iter_count(&from)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) skip -= iov_iter_count(&from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) iov_iter_advance(&from, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) result = sock_xmit(nbd, index, 1, &from, flags, &sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (result <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (was_interrupted(result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* We've already sent the header, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * have no choice but to set pending and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * return BUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) nsock->pending = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) nsock->sent = sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) set_bit(NBD_CMD_REQUEUED, &cmd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dev_err(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) "Send data failed (result %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * The completion might already have come in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * so break for the last one instead of letting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * the iterator do it. This prevents use-after-free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * of the bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (is_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) bio = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) trace_nbd_payload_sent(req, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) nsock->pending = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) nsock->sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* NULL returned = something went wrong, inform userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct nbd_reply reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct nbd_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u64 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) u16 hwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) u32 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct iov_iter to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) reply.magic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (result <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!nbd_disconnected(config))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) dev_err(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) "Receive control failed (result %d)\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return ERR_PTR(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) (unsigned long)ntohl(reply.magic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return ERR_PTR(-EPROTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) memcpy(&handle, reply.handle, sizeof(handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) tag = nbd_handle_to_tag(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) hwq = blk_mq_unique_tag_to_hwq(tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (hwq < nbd->tag_set.nr_hw_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) blk_mq_unique_tag_to_tag(tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (!req || !blk_mq_request_started(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) tag, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) trace_nbd_header_received(req, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) mutex_lock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (cmd->status != BLK_STS_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (ntohl(reply.error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ntohl(reply.error));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) cmd->status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (rq_data_dir(req) != WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) rq_for_each_segment(bvec, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (result <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * If we've disconnected, we need to make sure we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * complete this request, otherwise error out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * and let the timeout stuff handle resubmitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * this request onto another connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (nbd_disconnected(config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) cmd->status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) req, bvec.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) trace_nbd_payload_received(req, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) mutex_unlock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return ret ? ERR_PTR(ret) : cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static void recv_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct recv_thread_args *args = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct recv_thread_args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) struct nbd_device *nbd = args->nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct nbd_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) cmd = nbd_read_stat(nbd, args->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (IS_ERR(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct nbd_sock *nsock = config->socks[args->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) mutex_lock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) nbd_mark_nsock_dead(nbd, nsock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) mutex_unlock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) rq = blk_mq_rq_from_pdu(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (likely(!blk_should_fake_timeout(rq->q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) blk_mq_complete_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) atomic_dec(&config->recv_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) wake_up(&config->recv_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) kfree(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static bool nbd_clear_req(struct request *req, void *data, bool reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* don't abort one completed request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (blk_mq_request_completed(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) mutex_lock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) cmd->status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) mutex_unlock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) blk_mq_complete_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static void nbd_clear_que(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) blk_mq_quiesce_queue(nbd->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) blk_mq_unquiesce_queue(nbd->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static int find_fallback(struct nbd_device *nbd, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) int new_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct nbd_sock *nsock = config->socks[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) int fallback = nsock->fallback_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (config->num_connections <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev_err_ratelimited(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) "Dead connection, failed to find a fallback\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (fallback >= 0 && fallback < config->num_connections &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) !config->socks[fallback]->dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (nsock->fallback_index < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) nsock->fallback_index >= config->num_connections ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) config->socks[nsock->fallback_index]->dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) for (i = 0; i < config->num_connections; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (i == index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (!config->socks[i]->dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) new_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) nsock->fallback_index = new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (new_index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) dev_err_ratelimited(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) "Dead connection, failed to find a fallback\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) new_index = nsock->fallback_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static int wait_for_reconnect(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (!config->dead_conn_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return wait_event_timeout(config->conn_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) atomic_read(&config->live_connections) > 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) config->dead_conn_timeout) > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct request *req = blk_mq_rq_from_pdu(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct nbd_device *nbd = cmd->nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct nbd_config *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct nbd_sock *nsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (!refcount_inc_not_zero(&nbd->config_refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dev_err_ratelimited(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) "Socks array is empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) blk_mq_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (index >= config->num_connections) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dev_err_ratelimited(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) "Attempted send on invalid socket\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) blk_mq_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) cmd->status = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) nsock = config->socks[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) mutex_lock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (nsock->dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int old_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) index = find_fallback(nbd, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) mutex_unlock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (wait_for_reconnect(nbd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) index = old_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* All the sockets should already be down at this point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * we just want to make sure that DISCONNECTED is set so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * any requests that come in that were queue'ed waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * for the reconnect timer don't trigger the timer again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * and instead just error out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) sock_shutdown(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) blk_mq_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* Handle the case that we have a pending request that was partially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * transmitted that _has_ to be serviced first. We need to call requeue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * here so that it gets put _after_ the request that is already on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * dispatch list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) blk_mq_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (unlikely(nsock->pending && nsock->pending != req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) nbd_requeue_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * Some failures are related to the link going down, so anything that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * returns EAGAIN can be retried on a different socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ret = nbd_send_cmd(nbd, cmd, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) dev_err_ratelimited(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) "Request send failed, requeueing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) nbd_mark_nsock_dead(nbd, nsock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) nbd_requeue_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) mutex_unlock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * Since we look at the bio's to send the request over the network we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * need to make sure the completion work doesn't mark this request done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * before we are done doing our send. This keeps us from dereferencing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * freed data if we have particularly fast completions (ie we get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * completion before we exit sock_xmit on the last bvec) or in the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * that the server is misbehaving (or there was an error) before we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * done sending everything over the wire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) mutex_lock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /* We can be called directly from the user space process, which means we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * could possibly have signals pending so our sendmsg will fail. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * this case we need to return that we are busy, otherwise error out as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) ret = nbd_handle_cmd(cmd, hctx->queue_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ret = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) else if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ret = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) mutex_unlock(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) *err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) sock = sockfd_lookup(fd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (!sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (sock->ops->shutdown == sock_no_shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) *err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) sockfd_put(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) bool netlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct nbd_sock **socks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct nbd_sock *nsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) sock = nbd_get_socket(nbd, arg, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (!sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * We need to make sure we don't get any errant requests while we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * reallocating the ->socks array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) blk_mq_freeze_queue(nbd->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (!netlink && !nbd->task_setup &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) !test_bit(NBD_RT_BOUND, &config->runtime_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) nbd->task_setup = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (!netlink &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) (nbd->task_setup != current ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) dev_err(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) "Device being setup by another task");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) goto put_socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (!nsock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) goto put_socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) socks = krealloc(config->socks, (config->num_connections + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) sizeof(struct nbd_sock *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (!socks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) kfree(nsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) goto put_socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) config->socks = socks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) nsock->fallback_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) nsock->dead = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) mutex_init(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) nsock->sock = sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) nsock->pending = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) nsock->sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) nsock->cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) socks[config->num_connections++] = nsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) atomic_inc(&config->live_connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) blk_mq_unfreeze_queue(nbd->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) put_socket:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) blk_mq_unfreeze_queue(nbd->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) sockfd_put(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct socket *sock, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct recv_thread_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) sock = nbd_get_socket(nbd, arg, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) args = kzalloc(sizeof(*args), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (!args) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) sockfd_put(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) for (i = 0; i < config->num_connections; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct nbd_sock *nsock = config->socks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (!nsock->dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) mutex_lock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (!nsock->dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) mutex_unlock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) sk_set_memalloc(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (nbd->tag_set.timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) atomic_inc(&config->recv_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) refcount_inc(&nbd->config_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) old = nsock->sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) nsock->fallback_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) nsock->sock = sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) nsock->dead = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) INIT_WORK(&args->work, recv_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) args->index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) args->nbd = nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) nsock->cookie++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) mutex_unlock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) sockfd_put(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* We take the tx_mutex in an error path in the recv_work, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * need to queue_work outside of the tx_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) queue_work(nbd->recv_workq, &args->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) atomic_inc(&config->live_connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) wake_up(&config->conn_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) sockfd_put(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) kfree(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static void nbd_bdev_reset(struct block_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (bdev->bd_openers > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) bd_set_nr_sectors(bdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static void nbd_parse_flags(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (config->flags & NBD_FLAG_READ_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) set_disk_ro(nbd->disk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) set_disk_ro(nbd->disk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (config->flags & NBD_FLAG_SEND_TRIM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (config->flags & NBD_FLAG_SEND_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (config->flags & NBD_FLAG_SEND_FUA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) blk_queue_write_cache(nbd->disk->queue, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) blk_queue_write_cache(nbd->disk->queue, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) blk_queue_write_cache(nbd->disk->queue, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static void send_disconnects(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct nbd_request request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) .magic = htonl(NBD_REQUEST_MAGIC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) .type = htonl(NBD_CMD_DISC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct iov_iter from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) for (i = 0; i < config->num_connections; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct nbd_sock *nsock = config->socks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) mutex_lock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) dev_err(disk_to_dev(nbd->disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) "Send disconnect failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) mutex_unlock(&nsock->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) static int nbd_disconnect(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) send_disconnects(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static void nbd_clear_sock(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) sock_shutdown(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) nbd_clear_que(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) nbd->task_setup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static void nbd_config_put(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (refcount_dec_and_mutex_lock(&nbd->config_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) &nbd->config_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) nbd_dev_dbg_close(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) nbd_size_clear(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) &config->runtime_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) nbd->task_recv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) nbd_clear_sock(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (config->num_connections) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) for (i = 0; i < config->num_connections; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) sockfd_put(config->socks[i]->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) kfree(config->socks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) kfree(config->socks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) kfree(nbd->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) nbd->config = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (nbd->recv_workq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) destroy_workqueue(nbd->recv_workq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) nbd->recv_workq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) nbd->tag_set.timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) nbd->disk->queue->limits.discard_granularity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) nbd->disk->queue->limits.discard_alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static int nbd_start_device(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) int num_connections = config->num_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int error = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (nbd->task_recv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (!config->socks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (num_connections > 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) nbd->recv_workq = alloc_workqueue("knbd%d-recv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) WQ_MEM_RECLAIM | WQ_HIGHPRI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) WQ_UNBOUND, 0, nbd->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (!nbd->recv_workq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) nbd->task_recv = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) nbd_parse_flags(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) nbd_dev_dbg_init(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) for (i = 0; i < num_connections; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct recv_thread_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) args = kzalloc(sizeof(*args), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (!args) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) sock_shutdown(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * If num_connections is m (2 < m),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * But NO.(n + 1) failed. We still have n recv threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * So, add flush_workqueue here to prevent recv threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * dropping the last config_refs and trying to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * the workqueue from inside the workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) flush_workqueue(nbd->recv_workq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) sk_set_memalloc(config->socks[i]->sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (nbd->tag_set.timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) config->socks[i]->sock->sk->sk_sndtimeo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) nbd->tag_set.timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) atomic_inc(&config->recv_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) refcount_inc(&nbd->config_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) INIT_WORK(&args->work, recv_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) args->nbd = nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) args->index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) queue_work(nbd->recv_workq, &args->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) nbd_size_update(nbd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) ret = nbd_start_device(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (max_part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ret = wait_event_interruptible(config->recv_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) atomic_read(&config->recv_threads) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) sock_shutdown(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) flush_workqueue(nbd->recv_workq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) mutex_lock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) nbd_bdev_reset(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /* user requested, ignore socket errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) struct block_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) sock_shutdown(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) __invalidate_device(bdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) nbd_bdev_reset(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) &nbd->config->runtime_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static bool nbd_is_valid_blksize(unsigned long blksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) blksize > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) nbd->tag_set.timeout = timeout * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) /* Must be called with config_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) case NBD_DISCONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return nbd_disconnect(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) case NBD_CLEAR_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) nbd_clear_sock_ioctl(nbd, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) case NBD_SET_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return nbd_add_socket(nbd, arg, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) case NBD_SET_BLKSIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (!arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) arg = NBD_DEF_BLKSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!nbd_is_valid_blksize(arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) nbd_size_set(nbd, arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) div_s64(config->bytesize, arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) case NBD_SET_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) nbd_size_set(nbd, config->blksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) div_s64(arg, config->blksize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) case NBD_SET_SIZE_BLOCKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) nbd_size_set(nbd, config->blksize, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) case NBD_SET_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) nbd_set_cmd_timeout(nbd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) case NBD_SET_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) config->flags = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) case NBD_DO_IT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return nbd_start_device_ioctl(nbd, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) case NBD_CLEAR_QUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * This is for compatibility only. The queue is always cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * by NBD_DO_IT or NBD_CLEAR_SOCK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) case NBD_PRINT_DEBUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) * For compatibility only, we no longer keep a list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * outstanding requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct nbd_device *nbd = bdev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) int error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* The block layer will pass back some non-nbd ioctls in case we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * special handling for them, but we don't so just return an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (_IOC_TYPE(cmd) != 0xab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) mutex_lock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /* Don't allow ioctl operations on a nbd device that was created with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) error = __nbd_ioctl(bdev, nbd, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static struct nbd_config *nbd_alloc_config(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct nbd_config *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (!config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) atomic_set(&config->recv_threads, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) init_waitqueue_head(&config->recv_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) init_waitqueue_head(&config->conn_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) config->blksize = NBD_DEF_BLKSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) atomic_set(&config->live_connections, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) try_module_get(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static int nbd_open(struct block_device *bdev, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) struct nbd_device *nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) mutex_lock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) nbd = bdev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (!nbd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (!refcount_inc_not_zero(&nbd->refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (!refcount_inc_not_zero(&nbd->config_refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) struct nbd_config *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) mutex_lock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (refcount_inc_not_zero(&nbd->config_refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) config = nbd->config = nbd_alloc_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (!config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) refcount_set(&nbd->config_refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) refcount_inc(&nbd->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) } else if (nbd_disconnected(nbd->config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static void nbd_release(struct gendisk *disk, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct nbd_device *nbd = disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct block_device *bdev = bdget_disk(disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) bdev->bd_openers == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) nbd_disconnect_and_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static const struct block_device_operations nbd_fops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) .open = nbd_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) .release = nbd_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) .ioctl = nbd_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) .compat_ioctl = nbd_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) #if IS_ENABLED(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct nbd_device *nbd = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (nbd->task_recv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return single_open(file, nbd_dbg_tasks_show, inode->i_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) static const struct file_operations nbd_dbg_tasks_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) .open = nbd_dbg_tasks_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) .release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct nbd_device *nbd = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) u32 flags = nbd->config->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) seq_printf(s, "Hex: 0x%08x\n\n", flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) seq_puts(s, "Known flags:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (flags & NBD_FLAG_HAS_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (flags & NBD_FLAG_READ_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) seq_puts(s, "NBD_FLAG_READ_ONLY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (flags & NBD_FLAG_SEND_FLUSH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (flags & NBD_FLAG_SEND_FUA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) seq_puts(s, "NBD_FLAG_SEND_FUA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (flags & NBD_FLAG_SEND_TRIM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return single_open(file, nbd_dbg_flags_show, inode->i_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static const struct file_operations nbd_dbg_flags_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) .open = nbd_dbg_flags_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) .release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) static int nbd_dev_dbg_init(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct dentry *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (!nbd_dbg_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (!dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) nbd_name(nbd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) config->dbg_dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static void nbd_dev_dbg_close(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) debugfs_remove_recursive(nbd->config->dbg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static int nbd_dbg_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct dentry *dbg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) dbg_dir = debugfs_create_dir("nbd", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (!dbg_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) nbd_dbg_dir = dbg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static void nbd_dbg_close(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) debugfs_remove_recursive(nbd_dbg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) static int nbd_dev_dbg_init(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) static void nbd_dev_dbg_close(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static int nbd_dbg_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) static void nbd_dbg_close(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) unsigned int hctx_idx, unsigned int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) cmd->nbd = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) cmd->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) mutex_init(&cmd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) static const struct blk_mq_ops nbd_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) .queue_rq = nbd_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) .complete = nbd_complete_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) .init_request = nbd_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) .timeout = nbd_xmit_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) static int nbd_dev_add(int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) struct nbd_device *nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (!nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) disk = alloc_disk(1 << part_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (!disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) goto out_free_nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (err == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (err >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) index = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) goto out_free_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) nbd->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) nbd->disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) nbd->tag_set.ops = &nbd_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) nbd->tag_set.nr_hw_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) nbd->tag_set.queue_depth = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) nbd->tag_set.numa_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) BLK_MQ_F_BLOCKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) nbd->tag_set.driver_data = nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) nbd->destroy_complete = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) err = blk_mq_alloc_tag_set(&nbd->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) goto out_free_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) q = blk_mq_init_queue(&nbd->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (IS_ERR(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) err = PTR_ERR(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) goto out_free_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) disk->queue = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * Tell the block layer that we are not a rotational device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) disk->queue->limits.discard_granularity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) disk->queue->limits.discard_alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) blk_queue_max_discard_sectors(disk->queue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) blk_queue_max_segment_size(disk->queue, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) blk_queue_max_segments(disk->queue, USHRT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) blk_queue_max_hw_sectors(disk->queue, 65536);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) disk->queue->limits.max_sectors = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) mutex_init(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) refcount_set(&nbd->config_refs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) refcount_set(&nbd->refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) INIT_LIST_HEAD(&nbd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) disk->major = NBD_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) disk->first_minor = index << part_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) disk->fops = &nbd_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) disk->private_data = nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) sprintf(disk->disk_name, "nbd%d", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) add_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) nbd_total_devices++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) out_free_tags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) blk_mq_free_tag_set(&nbd->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) out_free_idr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) idr_remove(&nbd_index_idr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) out_free_disk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) put_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) out_free_nbd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) kfree(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static int find_free_cb(int id, void *ptr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) struct nbd_device *nbd = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) struct nbd_device **found = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (!refcount_read(&nbd->config_refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) *found = nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /* Netlink interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) [NBD_ATTR_INDEX] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) [NBD_SOCK_FD] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) /* We don't use this right now since we don't parse the incoming list, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) * still want it here so userspace knows what to expect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static const struct nla_policy __attribute__((unused))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) struct nbd_config *config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) u64 bsize = config->blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) u64 bytes = config->bytesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) if (info->attrs[NBD_ATTR_SIZE_BYTES])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (!bsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) bsize = NBD_DEF_BLKSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) if (!nbd_is_valid_blksize(bsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) printk(KERN_ERR "Invalid block size %llu\n", bsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (bytes != config->bytesize || bsize != config->blksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) nbd_size_set(nbd, bsize, div64_u64(bytes, bsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) DECLARE_COMPLETION_ONSTACK(destroy_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) struct nbd_device *nbd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) struct nbd_config *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) int index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) bool put_dev = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (!netlink_capable(skb, CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (info->attrs[NBD_ATTR_INDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (!info->attrs[NBD_ATTR_SOCKETS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) printk(KERN_ERR "nbd: must specify at least one socket\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) mutex_lock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (index == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) int new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) new_index = nbd_dev_add(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (new_index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) printk(KERN_ERR "nbd: failed to add new device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) return new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) nbd = idr_find(&nbd_index_idr, new_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) nbd = idr_find(&nbd_index_idr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (!nbd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) ret = nbd_dev_add(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) printk(KERN_ERR "nbd: failed to add new device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) nbd = idr_find(&nbd_index_idr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) if (!nbd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) printk(KERN_ERR "nbd: couldn't find device at index %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) nbd->destroy_complete = &destroy_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) /* Wait untill the the nbd stuff is totally destroyed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) wait_for_completion(&destroy_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (!refcount_inc_not_zero(&nbd->refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (index == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) printk(KERN_ERR "nbd: device at index %d is going down\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) mutex_lock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (refcount_read(&nbd->config_refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (index == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) printk(KERN_ERR "nbd: nbd%d already in use\n", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (WARN_ON(nbd->config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) config = nbd->config = nbd_alloc_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (!nbd->config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) printk(KERN_ERR "nbd: couldn't allocate config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) refcount_set(&nbd->config_refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) set_bit(NBD_RT_BOUND, &config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) ret = nbd_genl_size_set(info, nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (info->attrs[NBD_ATTR_TIMEOUT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) nbd_set_cmd_timeout(nbd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) config->dead_conn_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) config->dead_conn_timeout *= HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (info->attrs[NBD_ATTR_SERVER_FLAGS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) config->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * We have 1 ref to keep the device around, and then 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * ref for our current operation here, which will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * inherited by the config. If we already have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) * DESTROY_ON_DISCONNECT set then we know we don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * that extra ref already held so we don't need the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * put_dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) &nbd->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) put_dev = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) &nbd->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) refcount_inc(&nbd->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) &config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (info->attrs[NBD_ATTR_SOCKETS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) int rem, fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) struct nlattr *socks[NBD_SOCK_MAX+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (nla_type(attr) != NBD_SOCK_ITEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) nbd_sock_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) info->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) printk(KERN_ERR "nbd: error processing sock list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (!socks[NBD_SOCK_FD])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) ret = nbd_add_socket(nbd, fd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) ret = nbd_start_device(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) refcount_inc(&nbd->config_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) nbd_connect_reply(info, nbd->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (put_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) static void nbd_disconnect_and_put(struct nbd_device *nbd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) mutex_lock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) nbd_disconnect(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) sock_shutdown(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) * Make sure recv thread has finished, so it does not drop the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * config ref and try to destroy the workqueue from inside the work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) * queue. And this also ensure that we can safely call nbd_clear_que()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) * to cancel the inflight I/Os.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (nbd->recv_workq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) flush_workqueue(nbd->recv_workq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) nbd_clear_que(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) nbd->task_setup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) &nbd->config->runtime_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) struct nbd_device *nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (!netlink_capable(skb, CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (!info->attrs[NBD_ATTR_INDEX]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) printk(KERN_ERR "nbd: must specify an index to disconnect\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) mutex_lock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) nbd = idr_find(&nbd_index_idr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (!nbd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) printk(KERN_ERR "nbd: couldn't find device at index %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (!refcount_inc_not_zero(&nbd->refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) printk(KERN_ERR "nbd: device at index %d is going down\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if (!refcount_inc_not_zero(&nbd->config_refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) nbd_disconnect_and_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) struct nbd_device *nbd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) struct nbd_config *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) bool put_dev = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (!netlink_capable(skb, CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (!info->attrs[NBD_ATTR_INDEX]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) mutex_lock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) nbd = idr_find(&nbd_index_idr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (!nbd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (!refcount_inc_not_zero(&nbd->refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) printk(KERN_ERR "nbd: device at index %d is going down\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (!refcount_inc_not_zero(&nbd->config_refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) dev_err(nbd_to_dev(nbd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) "not configured, cannot reconfigure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) mutex_lock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) config = nbd->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) !nbd->task_recv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) dev_err(nbd_to_dev(nbd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) "not configured, cannot reconfigure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) ret = nbd_genl_size_set(info, nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) if (info->attrs[NBD_ATTR_TIMEOUT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) nbd_set_cmd_timeout(nbd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) config->dead_conn_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) config->dead_conn_timeout *= HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) &nbd->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) put_dev = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) &nbd->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) refcount_inc(&nbd->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) &config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) &config->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (info->attrs[NBD_ATTR_SOCKETS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) int rem, fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct nlattr *socks[NBD_SOCK_MAX+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (nla_type(attr) != NBD_SOCK_ITEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) nbd_sock_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) info->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) printk(KERN_ERR "nbd: error processing sock list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) if (!socks[NBD_SOCK_FD])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) ret = nbd_reconnect_socket(nbd, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (ret == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) dev_info(nbd_to_dev(nbd), "reconnected socket\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) mutex_unlock(&nbd->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) nbd_config_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (put_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) static const struct genl_small_ops nbd_connect_genl_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) .cmd = NBD_CMD_CONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) .doit = nbd_genl_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) .cmd = NBD_CMD_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) .doit = nbd_genl_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) .cmd = NBD_CMD_RECONFIGURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) .doit = nbd_genl_reconfigure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) .cmd = NBD_CMD_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) .doit = nbd_genl_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) static const struct genl_multicast_group nbd_mcast_grps[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) { .name = NBD_GENL_MCAST_GROUP_NAME, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) static struct genl_family nbd_genl_family __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) .hdrsize = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) .name = NBD_GENL_FAMILY_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) .version = NBD_GENL_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) .small_ops = nbd_connect_genl_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) .maxattr = NBD_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) .policy = nbd_attr_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) .mcgrps = nbd_mcast_grps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) struct nlattr *dev_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) u8 connected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) /* This is a little racey, but for status it's ok. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * reason we don't take a ref here is because we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * take a ref in the index == -1 case as we would need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) * to put under the nbd_index_mutex, which could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * deadlock if we are configured to remove ourselves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * once we're disconnected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (refcount_read(&nbd->config_refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) connected = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (!dev_opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) connected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) nla_nest_end(reply, dev_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static int status_cb(int id, void *ptr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) struct nbd_device *nbd = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) return populate_nbd_status(nbd, (struct sk_buff *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) struct nlattr *dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) struct sk_buff *reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) void *reply_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) size_t msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) int index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (info->attrs[NBD_ATTR_INDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) mutex_lock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) nla_attr_size(sizeof(u8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) msg_size *= (index == -1) ? nbd_total_devices : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) reply = genlmsg_new(msg_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if (!reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) NBD_CMD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (!reply_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) nlmsg_free(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (index == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) nlmsg_free(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) struct nbd_device *nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) nbd = idr_find(&nbd_index_idr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (nbd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) ret = populate_nbd_status(nbd, reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) nlmsg_free(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) nla_nest_end(reply, dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) genlmsg_end(reply, reply_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) ret = genlmsg_reply(reply, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) static void nbd_connect_reply(struct genl_info *info, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) void *msg_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) NBD_CMD_CONNECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) if (!msg_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) genlmsg_end(skb, msg_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) genlmsg_reply(skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) static void nbd_mcast_index(int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) void *msg_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) NBD_CMD_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (!msg_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) genlmsg_end(skb, msg_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static void nbd_dead_link_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) struct link_dead_args *args = container_of(work, struct link_dead_args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) nbd_mcast_index(args->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) kfree(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) static int __init nbd_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (max_part < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) printk(KERN_ERR "nbd: max_part must be >= 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) part_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) if (max_part > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) part_shift = fls(max_part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) * Adjust max_part according to part_shift as it is exported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) * to user space so that user can know the max number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) * partition kernel should be able to manage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) * Note that -1 is required because partition 0 is reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) * for the whole disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) max_part = (1UL << part_shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) if ((1UL << part_shift) > DISK_MAX_PARTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if (nbds_max > 1UL << (MINORBITS - part_shift))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (register_blkdev(NBD_MAJOR, "nbd"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) if (genl_register_family(&nbd_genl_family)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) unregister_blkdev(NBD_MAJOR, "nbd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) nbd_dbg_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) mutex_lock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) for (i = 0; i < nbds_max; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) nbd_dev_add(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) static int nbd_exit_cb(int id, void *ptr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) struct list_head *list = (struct list_head *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) struct nbd_device *nbd = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) list_add_tail(&nbd->list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) static void __exit nbd_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) struct nbd_device *nbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) LIST_HEAD(del_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) nbd_dbg_close();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) mutex_lock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) mutex_unlock(&nbd_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) while (!list_empty(&del_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) nbd = list_first_entry(&del_list, struct nbd_device, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) list_del_init(&nbd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (refcount_read(&nbd->refs) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) printk(KERN_ERR "nbd: possibly leaking a device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) nbd_put(nbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) idr_destroy(&nbd_index_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) genl_unregister_family(&nbd_genl_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) unregister_blkdev(NBD_MAJOR, "nbd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) module_init(nbd_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) module_exit(nbd_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) MODULE_DESCRIPTION("Network Block Device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) module_param(nbds_max, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) module_param(max_part, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");