^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * RDMA Network Block Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef RNBD_CLT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define RNBD_CLT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <rtrs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "rnbd-proto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "rnbd-log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* Max. number of segments per IO request, Mellanox Connect X ~ Connect X5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * choose minimial 30 for all, minus 1 for internal protocol, so 29.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define BMAX_SEGMENTS 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* time in seconds between reconnect tries, default to 30 s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define RECONNECT_DELAY 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Number of times to reconnect on error before giving up, 0 for * disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * -1 for forever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define MAX_RECONNECTS -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) enum rnbd_clt_dev_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) DEV_STATE_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) DEV_STATE_MAPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) DEV_STATE_MAPPED_DISCONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) DEV_STATE_UNMAPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct rnbd_iu_comp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct rnbd_iu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct request *rq; /* for block io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void *buf; /* for user messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct rtrs_permit *permit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* use to send msg associated with a dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct rnbd_clt_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* use to send msg associated with a sess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct rnbd_clt_session *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct scatterlist sglist[BMAX_SEGMENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct rnbd_iu_comp comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) atomic_t refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct rnbd_cpu_qlist {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct list_head requeue_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) spinlock_t requeue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct rnbd_clt_session {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct rtrs_clt *rtrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) wait_queue_head_t rtrs_waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) bool rtrs_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct rnbd_cpu_qlist __percpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *cpu_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int __percpu *cpu_rr; /* per-cpu var for CPU round-robin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) atomic_t busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) size_t queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u32 max_io_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct blk_mq_tag_set tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct mutex lock; /* protects state and devs_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct list_head devs_list; /* list of struct rnbd_clt_dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) refcount_t refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) char sessname[NAME_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u8 ver; /* protocol version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Submission queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct rnbd_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct list_head requeue_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned long in_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct rnbd_clt_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct blk_mq_hw_ctx *hctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct rnbd_clt_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct rnbd_clt_session *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct request_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct rnbd_queue *hw_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* local Idr index - used to track minor number allocations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u32 clt_device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) enum rnbd_clt_dev_state dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) char *pathname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) enum rnbd_access_mode access_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bool read_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) bool rotational;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 max_hw_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 max_write_same_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u32 max_discard_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u32 discard_granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 discard_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u16 secure_discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u16 physical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u16 logical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u16 max_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) size_t nsectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u64 size; /* device size in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct gendisk *gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct kobject kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) char *blk_symlink_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) refcount_t refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct work_struct unmap_on_rmmod_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* rnbd-clt.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct rtrs_addr *paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) size_t path_cnt, u16 port_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) const char *pathname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) enum rnbd_access_mode access_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) const struct attribute *sysfs_self);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int rnbd_clt_remap_device(struct rnbd_clt_dev *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* rnbd-clt-sysfs.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int rnbd_clt_create_sysfs_files(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) void rnbd_clt_destroy_sysfs_files(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void rnbd_clt_destroy_default_group(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif /* RNBD_CLT_H */