^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) drbd_nl.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/drbd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/blkpg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "drbd_int.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "drbd_protocol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "drbd_req.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "drbd_state_change.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/drbd_limits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <net/genetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* .doit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* .dumpit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int drbd_adm_dump_devices_done(struct netlink_callback *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int drbd_adm_dump_connections_done(struct netlink_callback *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <linux/drbd_genl_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include "drbd_nla.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <linux/genl_magic_func.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) DEFINE_MUTEX(notification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* used blkdev_get_by_path, to claim our meta data device(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (genlmsg_reply(skb, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) pr_err("error sending genl reply\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * reason it could fail was no space in skb, and there are 4k available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (!info || !info[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) err = nla_put_string(skb, T_info_text, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) nla_nest_cancel(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) nla_nest_end(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) __printf(2, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct nlattr *nla, *txt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) txt = nla_reserve(skb, T_info_text, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (!txt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) nla_nest_cancel(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) len = vscnprintf(nla_data(txt), 256, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* maybe: retry with larger reserve, if truncated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) txt->nla_len = nla_attr_size(len+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) nlmsg_trim(skb, (char*)txt + NLA_ALIGN(txt->nla_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) nla_nest_end(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* This would be a good candidate for a "pre_doit" hook,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * and per-family private info->pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * But we need to stay compatible with older kernels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * If it returns successfully, adm_ctx members are valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * At this point, we still rely on the global genl_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * to add additional synchronization against object destruction/modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define DRBD_ADM_NEED_MINOR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define DRBD_ADM_NEED_RESOURCE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define DRBD_ADM_NEED_CONNECTION 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct sk_buff *skb, struct genl_info *info, unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct drbd_genlmsghdr *d_in = info->userhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) const u8 cmd = info->genlhdr->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) memset(adm_ctx, 0, sizeof(*adm_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!adm_ctx->reply_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) info, &drbd_genl_family, 0, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* put of a few bytes into a fresh skb of >= 4k will always succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * but anyways */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!adm_ctx->reply_dh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) adm_ctx->reply_dh->minor = d_in->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) adm_ctx->reply_dh->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) adm_ctx->volume = VOLUME_UNSPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* parse and validate only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) err = drbd_cfg_context_from_attrs(NULL, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* It was present, and valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * copy it over to the reply skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) err = nla_put_nohdr(adm_ctx->reply_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) info->attrs[DRBD_NLA_CFG_CONTEXT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* and assign stuff to the adm_ctx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) nla = nested_attr_tb[__nla_type(T_ctx_volume)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) adm_ctx->volume = nla_get_u32(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) adm_ctx->resource_name = nla_data(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if ((adm_ctx->my_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) (adm_ctx->peer_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) adm_ctx->minor = d_in->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) adm_ctx->device = minor_to_device(d_in->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* We are protected by the global genl_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * But we may explicitly drop it/retake it in drbd_adm_set_role(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * so make sure this object stays around. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (adm_ctx->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) kref_get(&adm_ctx->device->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (adm_ctx->resource_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return ERR_MINOR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (adm_ctx->resource_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return ERR_RES_NOT_KNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (flags & DRBD_ADM_NEED_CONNECTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (adm_ctx->resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (adm_ctx->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (adm_ctx->my_addr && adm_ctx->peer_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) nla_len(adm_ctx->my_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) nla_data(adm_ctx->peer_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) nla_len(adm_ctx->peer_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!adm_ctx->connection) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* some more paranoia, if the request was over-determined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (adm_ctx->device && adm_ctx->resource &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) adm_ctx->device->resource != adm_ctx->resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) adm_ctx->minor, adm_ctx->resource->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) adm_ctx->device->resource->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (adm_ctx->device &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) adm_ctx->volume != VOLUME_UNSPECIFIED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) adm_ctx->volume != adm_ctx->device->vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) adm_ctx->minor, adm_ctx->volume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) adm_ctx->device->vnr, adm_ctx->device->resource->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* still, provide adm_ctx->resource always, if possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (!adm_ctx->resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (adm_ctx->resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) kref_get(&adm_ctx->resource->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) nlmsg_free(adm_ctx->reply_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) adm_ctx->reply_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct genl_info *info, int retcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (adm_ctx->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) kref_put(&adm_ctx->device->kref, drbd_destroy_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) adm_ctx->device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (adm_ctx->connection) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) adm_ctx->connection = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (adm_ctx->resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) adm_ctx->resource = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (!adm_ctx->reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) adm_ctx->reply_dh->ret_code = retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) drbd_adm_send_reply(adm_ctx->reply_skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void setup_khelper_env(struct drbd_connection *connection, char **envp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) char *afs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* FIXME: A future version will not allow this case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) afs = "ipv6";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) afs = "ipv4";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) afs = "ssocks";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int drbd_khelper(struct drbd_device *device, char *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) char *envp[] = { "HOME=/",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) "TERM=linux",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) (char[20]) { }, /* address family */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) (char[60]) { }, /* address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) char mb[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct drbd_connection *connection = first_peer_device(device)->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct sib_info sib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (current == connection->worker.task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) set_bit(CALLBACK_PENDING, &connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) snprintf(mb, 14, "minor-%d", device_to_minor(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) setup_khelper_env(connection, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* The helper may take some time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * write out any unsynced meta data changes now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) drbd_md_sync(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) sib.sib_reason = SIB_HELPER_PRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sib.helper_name = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) drbd_bcast_event(device, &sib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) drbd_usermode_helper, cmd, mb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) (ret >> 8) & 0xff, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) drbd_usermode_helper, cmd, mb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) (ret >> 8) & 0xff, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) sib.sib_reason = SIB_HELPER_POST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) sib.helper_exit_code = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) drbd_bcast_event(device, &sib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (current == connection->worker.task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) clear_bit(CALLBACK_PENDING, &connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (ret < 0) /* Ignore any ERRNOs we got. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) char *envp[] = { "HOME=/",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) "TERM=linux",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) (char[20]) { }, /* address family */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) (char[60]) { }, /* address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) char *resource_name = connection->resource->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) setup_khelper_env(connection, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) conn_md_sync(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* TODO: conn_bcast_event() ?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) drbd_usermode_helper, cmd, resource_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) (ret >> 8) & 0xff, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) drbd_usermode_helper, cmd, resource_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) (ret >> 8) & 0xff, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* TODO: conn_bcast_event() ?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (ret < 0) /* Ignore any ERRNOs we got. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) enum drbd_fencing_p fp = FP_NOT_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (get_ldev_if_state(device, D_CONSISTENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct disk_conf *disk_conf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rcu_dereference(peer_device->device->ldev->disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static bool resource_is_supended(struct drbd_resource *resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return resource->susp || resource->susp_fen || resource->susp_nod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) bool conn_try_outdate_peer(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct drbd_resource * const resource = connection->resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) unsigned int connect_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) union drbd_state mask = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) union drbd_state val = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) enum drbd_fencing_p fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) char *ex_to_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) spin_lock_irq(&resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (connection->cstate >= C_WF_REPORT_PARAMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_unlock_irq(&resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) connect_cnt = connection->connect_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) spin_unlock_irq(&resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) fp = highest_fencing_policy(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) switch (fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) case FP_NOT_AVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) spin_lock_irq(&resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (connection->cstate < C_WF_REPORT_PARAMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) _conn_request_state(connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) (union drbd_state) { { .susp_fen = 1 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) (union drbd_state) { { .susp_fen = 0 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) CS_VERBOSE | CS_HARD | CS_DC_SUSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* We are no longer suspended due to the fencing policy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * We may still be suspended due to the on-no-data-accessible policy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * If that was OND_IO_ERROR, fail pending requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!resource_is_supended(resource))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Else: in case we raced with a connection handshake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * let the handshake figure out if we maybe can RESEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * and do not resume/fail pending requests here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Worst case is we stay suspended for now, which may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * resolved by either re-establishing the replication link, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * the next link failure, or eventually the administrator. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) spin_unlock_irq(&resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) case FP_DONT_CARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) default: ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) r = conn_khelper(connection, "fence-peer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) switch ((r>>8) & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) case P_INCONSISTENT: /* peer is inconsistent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ex_to_string = "peer is inconsistent or worse";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) mask.pdsk = D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) val.pdsk = D_INCONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case P_OUTDATED: /* peer got outdated, or was already outdated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ex_to_string = "peer was fenced";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) mask.pdsk = D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) val.pdsk = D_OUTDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) case P_DOWN: /* peer was down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (conn_highest_disk(connection) == D_UP_TO_DATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* we will(have) create(d) a new UUID anyways... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ex_to_string = "peer is unreachable, assumed to be dead";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) mask.pdsk = D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) val.pdsk = D_OUTDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * This is useful when an unconnected R_SECONDARY is asked to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * become R_PRIMARY, but finds the other peer being active. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ex_to_string = "peer is active";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) drbd_warn(connection, "Peer is primary, outdating myself.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) mask.disk = D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) val.disk = D_OUTDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) case P_FENCING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* THINK: do we need to handle this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * like case 4, or more like case 5? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (fp != FP_STONITH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ex_to_string = "peer was stonithed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) mask.pdsk = D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) val.pdsk = D_OUTDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* The script is broken ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return false; /* Eventually leave IO frozen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) drbd_info(connection, "fence-peer helper returned %d (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) (r>>8) & 0xff, ex_to_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Not using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) conn_request_state(connection, mask, val, CS_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) here, because we might were able to re-establish the connection in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) meantime. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) spin_lock_irq(&resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (connection->connect_cnt != connect_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* In case the connection was established and droped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) while the fence-peer handler was running, ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) drbd_info(connection, "Ignoring fence-peer exit code\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) _conn_request_state(connection, mask, val, CS_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) spin_unlock_irq(&resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return conn_highest_pdsk(connection) <= D_OUTDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static int _try_outdate_peer_async(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct drbd_connection *connection = (struct drbd_connection *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) conn_try_outdate_peer(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) kref_put(&connection->kref, drbd_destroy_connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) void conn_try_outdate_peer_async(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct task_struct *opa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) kref_get(&connection->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* We may have just sent a signal to this thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * to get it out of some blocking network function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * Clear signals; otherwise kthread_run(), which internally uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * wait_on_completion_killable(), will mistake our pending signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * for a new fatal signal and fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) flush_signals(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (IS_ERR(opa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) kref_put(&connection->kref, drbd_destroy_connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct drbd_peer_device *const peer_device = first_peer_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) const int max_tries = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct net_conf *nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int try = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int forced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) union drbd_state mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (new_role == R_PRIMARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* Detect dead peers as soon as possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) for_each_connection(connection, device->resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) request_ping(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) mutex_lock(device->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) mask.i = 0; mask.role = R_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) val.i = 0; val.role = new_role;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) while (try++ < max_tries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* in case we first succeeded to outdate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * but now suddenly could establish a connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) val.pdsk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mask.pdsk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (rv == SS_NO_UP_TO_DATE_DISK && force &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) (device->state.disk < D_UP_TO_DATE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) device->state.disk >= D_INCONSISTENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) mask.disk = D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) val.disk = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) forced = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (rv == SS_NO_UP_TO_DATE_DISK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (conn_try_outdate_peer(connection)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) val.disk = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) mask.disk = D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (rv == SS_NOTHING_TO_DO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!conn_try_outdate_peer(connection) && force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) drbd_warn(device, "Forced into split brain situation!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) mask.pdsk = D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) val.pdsk = D_OUTDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (rv == SS_TWO_PRIMARIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Maybe the peer is detected as dead very soon...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) retry at most once more in this case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (try < max_tries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) try = max_tries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) nc = rcu_dereference(connection->net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) schedule_timeout_interruptible(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (rv < SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) rv = _drbd_request_state(device, mask, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) CS_VERBOSE + CS_WAIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (rv < SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (rv < SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (forced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) drbd_warn(device, "Forced to consider local data as UpToDate!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* Wait until nothing is on the fly :) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* FIXME also wait for all pending P_BARRIER_ACK? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (new_role == R_SECONDARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) mutex_lock(&device->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) nc = connection->net_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (nc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) nc->discard_my_data = 0; /* without copy; single bit op is atomic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) mutex_unlock(&device->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (((device->state.conn < C_CONNECTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) device->state.pdsk <= D_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) drbd_uuid_new_current(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* writeout of activity log covered areas of the bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * to stable storage done in after state change already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (device->state.conn >= C_WF_REPORT_PARAMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /* if this was forced, we should consider sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (forced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) drbd_send_uuids(peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) drbd_send_current_state(peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) drbd_md_sync(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) set_disk_ro(device->vdisk, new_role == R_SECONDARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) mutex_unlock(device->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static const char *from_attrs_err_to_txt(int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return err == -ENOMSG ? "required attribute missing" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) err == -EOPNOTSUPP ? "unknown mandatory attribute" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) err == -EEXIST ? "can not change invariant setting" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) "invalid attribute value";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct set_role_parms parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) memset(&parms, 0, sizeof(parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) err = set_role_parms_from_attrs(&parms, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) genl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) genl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /* Initializes the md.*_offset members, so we are able to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * the on disk meta data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * We currently have two possible layouts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * external:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * |----------- md_size_sect ------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * [ 4k superblock ][ activity log ][ Bitmap ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * | al_offset == 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * | bm_offset = al_offset + X |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * ==> bitmap sectors = md_size_sect - bm_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * internal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * |----------- md_size_sect ------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * | al_offset < 0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * | bm_offset = al_offset - Y |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * ==> bitmap sectors = Y = al_offset - bm_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * Activity log size used to be fixed 32kB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * but is about to become configurable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static void drbd_md_set_sector_offsets(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct drbd_backing_dev *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) sector_t md_size_sect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) unsigned int al_size_sect = bdev->md.al_size_4k * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) bdev->md.md_offset = drbd_md_ss(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) switch (bdev->md.meta_dev_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* v07 style fixed size indexed meta data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) bdev->md.md_size_sect = MD_128MB_SECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) bdev->md.al_offset = MD_4kB_SECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) case DRBD_MD_INDEX_FLEX_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* just occupy the full device; unit: sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) bdev->md.al_offset = MD_4kB_SECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) case DRBD_MD_INDEX_INTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) case DRBD_MD_INDEX_FLEX_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* al size is still fixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) bdev->md.al_offset = -al_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /* we need (slightly less than) ~ this much bitmap sectors: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) md_size_sect = drbd_get_capacity(bdev->backing_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) md_size_sect = BM_SECT_TO_EXT(md_size_sect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) md_size_sect = ALIGN(md_size_sect, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* plus the "drbd meta data super block",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * and the activity log; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) md_size_sect += MD_4kB_SECT + al_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) bdev->md.md_size_sect = md_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* bitmap offset is adjusted by 'super' block size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* input size is expected to be in KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) char *ppsize(char *buf, unsigned long long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* Needs 9 bytes at max including trailing NUL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * -1ULL ==> "16384 EB" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) while (size >= 10000 && base < sizeof(units)-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* shift + round */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) size = (size >> 10) + !!(size & (1<<9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) base++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) sprintf(buf, "%u %cB", (unsigned)size, units[base]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* there is still a theoretical deadlock when called from receiver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * on an D_INCONSISTENT R_PRIMARY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * remote READ does inc_ap_bio, receiver would need to receive answer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * packet from remote to dec_ap_bio again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * receiver receive_sizes(), comes here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * waits for ap_bio_cnt == 0. -> deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * but this cannot happen, actually, because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * (not connected, or bad/no disk on peer):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * see drbd_fail_request_early, ap_bio_cnt is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * peer may not initiate a resize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* Note these are not to be confused with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * drbd_adm_suspend_io/drbd_adm_resume_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * which are (sub) state changes triggered by admin (drbdsetup),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * and can be long lived.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * This changes an device->flag, is triggered by drbd internals,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * and should be short-lived. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* It needs to be a counter, since multiple threads might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) independently suspend and resume IO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) void drbd_suspend_io(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) atomic_inc(&device->suspend_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (drbd_suspended(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) void drbd_resume_io(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (atomic_dec_and_test(&device->suspend_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) wake_up(&device->misc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * drbd_determine_dev_size() - Sets the right device size obeying all constraints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * @device: DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * Returns 0 on success, negative return values indicate errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * You should call drbd_md_sync() after calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) enum determine_dev_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct md_offsets_and_sizes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) u64 last_agreed_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) u64 md_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) s32 al_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) s32 bm_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) u32 md_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) u32 al_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) u32 al_stripe_size_4k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) } prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) sector_t u_size, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct drbd_md *md = &device->ldev->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) void *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int md_moved, la_size_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) enum determine_dev_size rv = DS_UNCHANGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /* We may change the on-disk offsets of our meta data below. Lock out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * anything that may cause meta data IO, to avoid acting on incomplete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * layout changes or scribbling over meta data that is in the process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * of being moved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * Move is not exactly correct, btw, currently we have all our meta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * data in core memory, to "move" it we just write it all out, there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * are no reads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) drbd_suspend_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) drbd_resume_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return DS_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /* remember current offset and sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) prev.last_agreed_sect = md->la_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) prev.md_offset = md->md_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) prev.al_offset = md->al_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) prev.bm_offset = md->bm_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) prev.md_size_sect = md->md_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) prev.al_stripes = md->al_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) prev.al_stripe_size_4k = md->al_stripe_size_4k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (rs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /* rs is non NULL if we should change the AL layout only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) md->al_stripes = rs->al_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) md->al_stripe_size_4k = rs->al_stripe_size / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) drbd_md_set_sector_offsets(device, device->ldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (size < prev.last_agreed_sect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (rs && u_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* Remove "rs &&" later. This check should always be active, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) right now the receiver expects the permissive behavior */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) drbd_warn(device, "Implicit shrink not allowed. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) "Use --size=%llus for explicit shrink.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) (unsigned long long)size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) rv = DS_ERROR_SHRINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (u_size > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) rv = DS_ERROR_SPACE_MD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (rv != DS_UNCHANGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (get_capacity(device->vdisk) != size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) drbd_bm_capacity(device) != size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* currently there is only one error: ENOMEM! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) size = drbd_bm_capacity(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) drbd_err(device, "OUT OF MEMORY! "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) "Could not allocate bitmap!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) drbd_err(device, "BM resizing failed. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) "Leaving size unchanged\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) rv = DS_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* racy, see comments above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) drbd_set_my_capacity(device, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) md->la_size_sect = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (rv <= DS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) md_moved = prev.md_offset != md->md_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) || prev.md_size_sect != md->md_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (la_size_changed || md_moved || rs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) u32 prev_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* We do some synchronous IO below, which may take some time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * Clear the timer, to avoid scary "timer expired!" messages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * "Superblock" is written out at least twice below, anyways. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) del_timer(&device->md_sync_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* We won't change the "al-extents" setting, we just may need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * to move the on-disk location of the activity log ringbuffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * Lock for transaction is good enough, it may well be "dirty"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * or even "starving". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* mark current on-disk bitmap and activity log as unreliable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) prev_flags = md->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) drbd_md_write(device, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) drbd_al_initialize(device, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) drbd_info(device, "Writing the whole bitmap, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) la_size_changed && md_moved ? "size changed and md moved" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) la_size_changed ? "size changed" : "md moved");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) "size changed", BM_LOCKED_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* on-disk bitmap and activity log is authoritative again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * (unless there was an IO error meanwhile...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) md->flags = prev_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) drbd_md_write(device, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) md->al_stripes, md->al_stripe_size_4k * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (size > prev.last_agreed_sect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (size < prev.last_agreed_sect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) rv = DS_SHRUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* restore previous offset and sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) md->la_size_sect = prev.last_agreed_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) md->md_offset = prev.md_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) md->al_offset = prev.al_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) md->bm_offset = prev.bm_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) md->md_size_sect = prev.md_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) md->al_stripes = prev.al_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) md->al_stripe_size_4k = prev.al_stripe_size_4k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) lc_unlock(device->act_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) wake_up(&device->al_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) drbd_md_put_buffer(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) drbd_resume_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) sector_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) sector_t u_size, int assume_peer_has_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) sector_t p_size = device->p_size; /* partner's disk size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) sector_t m_size; /* my size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) sector_t size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) m_size = drbd_get_max_capacity(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) drbd_warn(device, "Resize while not connected was forced by the user!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) p_size = m_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (p_size && m_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) size = min_t(sector_t, p_size, m_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (la_size_sect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) size = la_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (m_size && m_size < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) size = m_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (p_size && p_size < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) size = p_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (m_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) size = m_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (p_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) size = p_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) drbd_err(device, "Both nodes diskless!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (u_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (u_size > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) (unsigned long)u_size>>1, (unsigned long)size>>1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) size = u_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * drbd_check_al_size() - Ensures that the AL is of the right size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * @device: DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * failed, and 0 on success. You should call drbd_md_sync() after you called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct lru_cache *n, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct lc_element *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) unsigned int in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (device->act_log &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) device->act_log->nr_elements == dc->al_extents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) in_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) t = device->act_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) dc->al_extents, sizeof(struct lc_element), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (n == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) drbd_err(device, "Cannot allocate act_log lru!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) spin_lock_irq(&device->al_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) for (i = 0; i < t->nr_elements; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) e = lc_element_by_index(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (e->refcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) drbd_err(device, "refcnt(%d)==%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) e->lc_number, e->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) in_use += e->refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (!in_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) device->act_log = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) spin_unlock_irq(&device->al_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (in_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) drbd_err(device, "Activity log still in use!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) lc_destroy(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) lc_destroy(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) q->limits.discard_granularity = granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /* when we introduced REQ_WRITE_SAME support, we also bumped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * our maximum supported batch bio size used for discards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (connection->agreed_features & DRBD_FF_WSAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return DRBD_MAX_BBIO_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return AL_EXTENT_SIZE >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) static void decide_on_discard_support(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct request_queue *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) bool discard_zeroes_if_aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* q = drbd device queue (device->rq_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * or NULL if diskless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct drbd_connection *connection = first_peer_device(device)->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) bool can_do = b ? blk_queue_discard(b) : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) can_do = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (can_do) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* We don't care for the granularity, really.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * Stacking limits below should fix it for the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * device. Whether or not it is a suitable granularity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * on the remote device is not our problem, really. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * you care, you need to use devices with similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * topology on all peers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) blk_queue_discard_granularity(q, 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) blk_queue_discard_granularity(q, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) q->limits.max_discard_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) q->limits.max_write_zeroes_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static void fixup_discard_if_not_supported(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* To avoid confusion, if this queue does not support discard, clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * max_discard_sectors, which is what lsblk -D reports to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * Older kernels got this wrong in "stack limits".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (!blk_queue_discard(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) blk_queue_max_discard_sectors(q, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) blk_queue_discard_granularity(q, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) /* Fixup max_write_zeroes_sectors after blk_stack_limits():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * if we can handle "zeroes" efficiently on the protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * we want to do that, even if our backend does not announce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * max_write_zeroes_sectors itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct drbd_connection *connection = first_peer_device(device)->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* If the peer announces WZEROES support, use it. Otherwise, rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * send explicit zeroes than rely on some discard-zeroes-data magic. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (connection->agreed_features & DRBD_FF_WZEROES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) q->limits.max_write_zeroes_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static void decide_on_write_same_support(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct request_queue *b, struct o_qlim *o,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) bool disable_write_same)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct drbd_peer_device *peer_device = first_peer_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct drbd_connection *connection = peer_device->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) bool can_do = b ? b->limits.max_write_same_sectors : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (can_do && disable_write_same) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) can_do = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) drbd_info(peer_device, "WRITE_SAME disabled by config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) can_do = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) drbd_info(peer_device, "peer does not support WRITE_SAME\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (o) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /* logical block size; queue_logical_block_size(NULL) is 512 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) unsigned int me_lbs_b = queue_logical_block_size(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) unsigned int me_lbs = queue_logical_block_size(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (me_lbs_b != me_lbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) drbd_warn(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) me_lbs, me_lbs_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) can_do = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (me_lbs_b != peer_lbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) me_lbs, peer_lbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (can_do) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) can_do = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) me_lbs = max(me_lbs, me_lbs_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* We cannot change the logical block size of an in-use queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * We can only hope that access happens to be properly aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * If not, the peer will likely produce an IO error, and detach. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (peer_lbs > me_lbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (device->state.role != R_PRIMARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) blk_queue_logical_block_size(q, peer_lbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) drbd_warn(peer_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) me_lbs, peer_lbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (can_do && !o->write_same_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /* If we introduce an open-coded write-same loop on the receiving side,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * the peer would present itself as "capable". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) can_do = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) unsigned int max_bio_size, struct o_qlim *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct request_queue * const q = device->rq_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) unsigned int max_hw_sectors = max_bio_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) unsigned int max_segments = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) struct request_queue *b = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct disk_conf *dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) bool discard_zeroes_if_aligned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) bool disable_write_same = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (bdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) b = bdev->backing_bdev->bd_disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) dc = rcu_dereference(device->ldev->disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) max_segments = dc->max_bio_bvecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) disable_write_same = dc->disable_write_same;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) blk_set_stacking_limits(&q->limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) blk_queue_max_hw_sectors(q, max_hw_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) /* This is the workaround for "bio would need to, but cannot, be split" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) blk_queue_segment_boundary(q, PAGE_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) decide_on_write_same_support(device, q, b, o, disable_write_same);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) blk_stack_limits(&q->limits, &b->limits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) blk_queue_update_readahead(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) fixup_discard_if_not_supported(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) fixup_write_zeroes(device, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) unsigned int now, new, local, peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) now = queue_max_hw_sectors(device->rq_queue) << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (bdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) device->local_max_bio_size = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) local = min(local, DRBD_MAX_BIO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* We may ignore peer limits if the peer is modern enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) Because new from 8.3.8 onwards the peer can use multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) BIOs for a single peer_request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (device->state.conn >= C_WF_REPORT_PARAMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (first_peer_device(device)->connection->agreed_pro_version < 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) else if (first_peer_device(device)->connection->agreed_pro_version == 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) peer = DRBD_MAX_SIZE_H80_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) else if (first_peer_device(device)->connection->agreed_pro_version < 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) peer = DRBD_MAX_BIO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /* We may later detach and re-attach on a disconnected Primary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * Avoid this setting to jump back in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * We want to store what we know the peer DRBD can handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * not what the peer IO backend can handle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (peer > device->peer_max_bio_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) device->peer_max_bio_size = peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) new = min(local, peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (device->state.role == R_PRIMARY && new < now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (new != now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) drbd_info(device, "max BIO size = %u\n", new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) drbd_setup_queue_param(device, bdev, new, o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /* Starts the worker thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static void conn_reconfig_start(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) drbd_thread_start(&connection->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) drbd_flush_workqueue(&connection->sender_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /* if still unconfigured, stops worker again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static void conn_reconfig_done(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) bool stop_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) spin_lock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) stop_threads = conn_all_vols_unconf(connection) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) connection->cstate == C_STANDALONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) spin_unlock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (stop_threads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) /* ack_receiver thread and ack_sender workqueue are implicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * stopped by receiver in conn_disconnect() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) drbd_thread_stop(&connection->receiver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) drbd_thread_stop(&connection->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /* Make sure IO is suspended before calling this function(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) static void drbd_suspend_al(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) int s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (!lc_try_lock(device->act_log)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) drbd_al_shrink(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) spin_lock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (device->state.conn < C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) spin_unlock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) lc_unlock(device->act_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) drbd_info(device, "Suspended AL updates\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) static bool should_set_defaults(struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* This is limited by 16 bit "slot" numbers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * and by available on-disk context storage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * Also (u16)~0 is special (denotes a "free" extent).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * One transaction occupies one 4kB on-disk block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * we have n such blocks in the on disk ring buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * the "current" transaction may fail (n-1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * and there is 919 slot numbers context information per transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) * 72 transaction blocks amounts to more than 2**16 context slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * so cap there first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) const unsigned int sufficient_on_disk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /AL_CONTEXT_PER_TRANSACTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) unsigned int al_size_4k = bdev->md.al_size_4k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (al_size_4k > sufficient_on_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) return max_al_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return a->disk_barrier != b->disk_barrier ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) a->disk_flushes != b->disk_flushes ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) a->disk_drain != b->disk_drain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) struct drbd_backing_dev *nbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (disk_conf->al_extents > drbd_al_extents_max(nbc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) disk_conf->al_extents = drbd_al_extents_max(nbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (!blk_queue_discard(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (disk_conf->rs_discard_granularity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) disk_conf->rs_discard_granularity = 0; /* disable feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) drbd_info(device, "rs_discard_granularity feature disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (disk_conf->rs_discard_granularity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) int orig_value = disk_conf->rs_discard_granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) int remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) disk_conf->rs_discard_granularity = q->limits.discard_granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) disk_conf->rs_discard_granularity += remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (disk_conf->rs_discard_granularity != orig_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) drbd_info(device, "rs_discard_granularity changed to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) disk_conf->rs_discard_granularity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) int err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (device->act_log &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) device->act_log->nr_elements == dc->al_extents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) drbd_suspend_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /* If IO completion is currently blocked, we would likely wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * "forever" for the activity log to become unused. So we don't. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (atomic_read(&device->ap_bio_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) wait_event(device->al_wait, lc_try_lock(device->act_log));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) drbd_al_shrink(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) err = drbd_check_al_size(device, dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) lc_unlock(device->act_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) wake_up(&device->al_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) drbd_resume_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) struct disk_conf *new_disk_conf, *old_disk_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) unsigned int fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) device = adm_ctx.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) /* we also need a disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * to change the options on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (!get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) retcode = ERR_NO_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (!new_disk_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) mutex_lock(&device->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) old_disk_conf = device->ldev->disk_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) *new_disk_conf = *old_disk_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (should_set_defaults(info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) set_disk_conf_defaults(new_disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) err = disk_conf_from_attrs_for_change(new_disk_conf, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (err && err != -ENOMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (!expect(new_disk_conf->resync_rate >= 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) new_disk_conf->resync_rate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) sanitize_disk_conf(device, new_disk_conf, device->ldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (fifo_size != device->rs_plan_s->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) new_plan = fifo_alloc(fifo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (!new_plan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) drbd_err(device, "kmalloc of fifo_buffer failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) err = disk_opts_check_al_size(device, new_disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) /* Could be just "busy". Ignore?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * Introduce dedicated error code? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) drbd_msg_put_info(adm_ctx.reply_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) "Try again without changing current al-extents setting");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) lock_all_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (retcode == NO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) drbd_resync_after_changed(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) unlock_all_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (new_plan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) old_plan = device->rs_plan_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) rcu_assign_pointer(device->rs_plan_s, new_plan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) mutex_unlock(&device->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (new_disk_conf->al_updates)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) device->ldev->md.flags &= ~MDF_AL_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) device->ldev->md.flags |= MDF_AL_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (new_disk_conf->md_flushes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) clear_bit(MD_NO_FUA, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) set_bit(MD_NO_FUA, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (write_ordering_changed(old_disk_conf, new_disk_conf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) || old_disk_conf->disable_write_same != new_disk_conf->disable_write_same)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) drbd_reconsider_queue_parameters(device, device->ldev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) drbd_md_sync(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (device->state.conn >= C_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) for_each_peer_device(peer_device, device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) drbd_send_sync_param(peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) kfree(old_disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) kfree(old_plan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) mod_timer(&device->request_timer, jiffies + HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) fail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) mutex_unlock(&device->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) kfree(new_disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) kfree(new_plan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static struct block_device *open_backing_dev(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) const char *bdev_path, void *claim_ptr, bool do_bd_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) bdev = blkdev_get_by_path(bdev_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (IS_ERR(bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) drbd_err(device, "open(\"%s\") failed with %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) bdev_path, PTR_ERR(bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) return bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (!do_bd_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) err = bd_link_disk_holder(bdev, device->vdisk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) bdev_path, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) bdev = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static int open_backing_devices(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct disk_conf *new_disk_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) struct drbd_backing_dev *nbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (IS_ERR(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return ERR_OPEN_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) nbc->backing_bdev = bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) * meta_dev_idx >= 0: external fixed size, possibly multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * drbd sharing one meta device. TODO in that case, paranoia
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * check that [md_bdev, meta_dev_idx] is not yet used by some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * other drbd minor! (if you use drbd.conf + drbdadm, that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * should check it for you already; but if you don't, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * someone fooled it, we need to double check here)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) bdev = open_backing_dev(device, new_disk_conf->meta_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * if potentially shared with other drbd minors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) * as would happen with internal metadata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (IS_ERR(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return ERR_OPEN_MD_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) nbc->md_bdev = bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) return NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) bool do_bd_unlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (!bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (do_bd_unlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) bd_unlink_disk_holder(bdev, device->vdisk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (ldev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) close_backing_dev(device, ldev->backing_bdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) kfree(ldev->disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) kfree(ldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) enum determine_dev_size dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) sector_t max_possible_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) sector_t min_md_device_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) struct disk_conf *new_disk_conf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) struct lru_cache *resync_lru = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) struct fifo_buffer *new_plan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) union drbd_state ns, os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct net_conf *nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) device = adm_ctx.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) peer_device = first_peer_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) connection = peer_device->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) conn_reconfig_start(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /* if you want to reconfigure, please tear down first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (device->state.disk > D_DISKLESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) retcode = ERR_DISK_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) /* It may just now have detached because of IO error. Make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * drbd_ldev_destroy is done already, we may end up here very fast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) * e.g. if someone calls attach from the on-io-error handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * to realize a "hot spare" feature (not that I'd recommend that) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /* make sure there is no leftover from previous force-detach attempts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) clear_bit(FORCE_DETACH, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) clear_bit(WAS_IO_ERROR, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) clear_bit(WAS_READ_ERROR, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /* and no leftover from previously aborted resync or verify, either */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) device->rs_total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) device->rs_failed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) atomic_set(&device->rs_pending_cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) /* allocation not in the IO path, drbdsetup context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (!nbc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) spin_lock_init(&nbc->md.uuid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (!new_disk_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) nbc->disk_conf = new_disk_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) set_disk_conf_defaults(new_disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) err = disk_conf_from_attrs(new_disk_conf, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (!new_plan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) retcode = ERR_MD_IDX_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) nc = rcu_dereference(connection->net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (nc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) retcode = ERR_STONITH_AND_PROT_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) retcode = open_backing_devices(device, new_disk_conf, nbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if ((nbc->backing_bdev == nbc->md_bdev) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) retcode = ERR_MD_IDX_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) resync_lru = lc_create("resync", drbd_bm_ext_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 1, 61, sizeof(struct bm_extent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) offsetof(struct bm_extent, lce));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (!resync_lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) /* Read our meta data super block early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * This also sets other on-disk offsets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) retcode = drbd_md_read(device, nbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) sanitize_disk_conf(device, new_disk_conf, nbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) (unsigned long long) drbd_get_max_capacity(nbc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) (unsigned long long) new_disk_conf->disk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) retcode = ERR_DISK_TOO_SMALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (new_disk_conf->meta_dev_idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /* at least one MB, otherwise it does not make sense */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) min_md_device_sectors = (2<<10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) max_possible_sectors = DRBD_MAX_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) retcode = ERR_MD_DISK_TOO_SMALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) drbd_warn(device, "refusing attach: md-device too small, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) "at least %llu sectors needed for this meta-disk type\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) (unsigned long long) min_md_device_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) /* Make sure the new disk is big enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * (we may currently be R_PRIMARY with no local disk...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (drbd_get_max_capacity(nbc) < get_capacity(device->vdisk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) retcode = ERR_DISK_TOO_SMALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (nbc->known_size > max_possible_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) drbd_warn(device, "==> truncating very big lower level device "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) "to currently maximum possible %llu sectors <==\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) (unsigned long long) max_possible_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (new_disk_conf->meta_dev_idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) drbd_warn(device, "==>> using internal or flexible "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) "meta data may help <<==\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) drbd_suspend_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) /* also wait for the last barrier ack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * We need a way to either ignore barrier acks for barriers sent before a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * was attached, or a way to wait for all pending barrier acks to come in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * As barriers are counted per resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * we'd need to suspend io on all devices of a resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) /* and for any other previously queued work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) drbd_flush_workqueue(&connection->sender_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) retcode = rv; /* FIXME: Type mismatch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) drbd_resume_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (rv < SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (!get_ldev_if_state(device, D_ATTACHING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) goto force_diskless;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (!device->bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (drbd_bm_init(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) goto force_diskless_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) (unsigned long long)device->ed_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) retcode = ERR_DATA_NOT_CURRENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) goto force_diskless_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) /* Since we are diskless, fix the activity log first... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (drbd_check_al_size(device, new_disk_conf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) goto force_diskless_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) /* Prevent shrinking of consistent devices ! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) unsigned long long nsz = drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) unsigned long long eff = nbc->md.la_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && nsz < eff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (nsz == nbc->disk_conf->disk_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) drbd_msg_sprintf_info(adm_ctx.reply_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) "To-be-attached device has last effective > current size, and is consistent\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) "(%llu > %llu sectors). Refusing to attach.", eff, nsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) retcode = ERR_IMPLICIT_SHRINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) goto force_diskless_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) lock_all_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (retcode != NO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) unlock_all_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) goto force_diskless_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) /* Reset the "barriers don't work" bits here, then force meta data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * be written, to ensure we determine if barriers are supported. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (new_disk_conf->md_flushes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) clear_bit(MD_NO_FUA, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) set_bit(MD_NO_FUA, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) /* Point of no return reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * Devices and memory are no longer released by error cleanup below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * now device takes over responsibility, and the state engine should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * clean it up somewhere. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) D_ASSERT(device, device->ldev == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) device->ldev = nbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) device->resync = resync_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) device->rs_plan_s = new_plan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) nbc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) resync_lru = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) new_disk_conf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) new_plan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) drbd_resync_after_changed(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) unlock_all_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) set_bit(CRASHED_PRIMARY, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) clear_bit(CRASHED_PRIMARY, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) !(device->state.role == R_PRIMARY && device->resource->susp_nod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) set_bit(CRASHED_PRIMARY, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) device->send_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) device->recv_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) device->read_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) device->writ_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) drbd_reconsider_queue_parameters(device, device->ldev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /* If I am currently not R_PRIMARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * but meta data primary indicator is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * I just now recover from a hard crash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) * and have been R_PRIMARY before that crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * Now, if I had no connection before that crash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) * (have been degraded R_PRIMARY), chances are that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * I won't find my peer now either.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) * In that case, and _only_ in that case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * we use the degr-wfc-timeout instead of the default,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * so we can automatically recover from a crash of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * degraded but active "cluster" after a certain timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) clear_bit(USE_DEGR_WFC_T, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (device->state.role != R_PRIMARY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) set_bit(USE_DEGR_WFC_T, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) dd = drbd_determine_dev_size(device, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (dd <= DS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) retcode = ERR_NOMEM_BITMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) goto force_diskless_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) } else if (dd == DS_GREW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) set_bit(RESYNC_AFTER_NEG, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) (test_bit(CRASHED_PRIMARY, &device->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) drbd_info(device, "Assuming that all blocks are out of sync "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) "(aka FullSync)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) "set_n_write from attaching", BM_LOCKED_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) retcode = ERR_IO_MD_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) goto force_diskless_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (drbd_bitmap_io(device, &drbd_bm_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) "read from attaching", BM_LOCKED_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) retcode = ERR_IO_MD_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) goto force_diskless_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) drbd_suspend_al(device); /* IO is still suspended here... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) spin_lock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) os = drbd_read_state(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) ns = os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) /* If MDF_CONSISTENT is not set go into inconsistent state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) otherwise investigate MDF_WasUpToDate...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) otherwise into D_CONSISTENT state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) ns.disk = D_CONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) ns.disk = D_OUTDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) ns.disk = D_INCONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) ns.pdsk = D_OUTDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (ns.disk == D_CONSISTENT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) ns.disk = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) this point, because drbd_request_state() modifies these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (rcu_dereference(device->ldev->disk_conf)->al_updates)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) device->ldev->md.flags &= ~MDF_AL_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) device->ldev->md.flags |= MDF_AL_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /* In case we are C_CONNECTED postpone any decision on the new disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) state after the negotiation phase. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) if (device->state.conn == C_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) device->new_state_tmp.i = ns.i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) ns.i = os.i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) ns.disk = D_NEGOTIATING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) /* We expect to receive up-to-date UUIDs soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) To avoid a race in receive_state, free p_uuid while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) holding req_lock. I.e. atomic with the state change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) kfree(device->p_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) device->p_uuid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) spin_unlock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (rv < SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) goto force_diskless_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) mod_timer(&device->request_timer, jiffies + HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (device->state.role == R_PRIMARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) drbd_md_mark_dirty(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) drbd_md_sync(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) conn_reconfig_done(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) force_diskless_dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) force_diskless:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) drbd_force_state(device, NS(disk, D_DISKLESS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) drbd_md_sync(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) conn_reconfig_done(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (nbc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) close_backing_dev(device, nbc->backing_bdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) kfree(nbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) kfree(new_disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) lc_destroy(resync_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) kfree(new_plan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) static int adm_detach(struct drbd_device *device, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) if (force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) set_bit(FORCE_DETACH, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) drbd_force_state(device, NS(disk, D_FAILED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) return SS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) return drbd_request_detach_interruptible(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) /* Detaching the disk is a process in multiple stages. First we need to lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * internal references as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * Only then we have finally detached. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) struct detach_parms parms = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) err = detach_parms_from_attrs(&parms, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) retcode = adm_detach(adm_ctx.device, parms.force_detach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) static bool conn_resync_running(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) bool rv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) if (device->state.conn == C_SYNC_SOURCE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) device->state.conn == C_SYNC_TARGET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) device->state.conn == C_PAUSED_SYNC_S ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) device->state.conn == C_PAUSED_SYNC_T) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) rv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) static bool conn_ov_running(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) bool rv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (device->state.conn == C_VERIFY_S ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) device->state.conn == C_VERIFY_T) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) rv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) static enum drbd_ret_code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) return ERR_NEED_APV_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (new_net_conf->two_primaries != old_net_conf->two_primaries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return ERR_NEED_APV_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) return ERR_NEED_APV_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (!new_net_conf->two_primaries &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) conn_highest_role(connection) == R_PRIMARY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) conn_highest_peer(connection) == R_PRIMARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) return ERR_NEED_ALLOW_TWO_PRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (new_net_conf->two_primaries &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) (new_net_conf->wire_protocol != DRBD_PROT_C))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) return ERR_NOT_PROTO_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) idr_for_each_entry(&connection->peer_devices, peer_device, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) return ERR_STONITH_AND_PROT_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) return ERR_DISCARD_IMPOSSIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) return ERR_CONG_NOT_PROTO_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) return NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) static enum drbd_ret_code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) enum drbd_ret_code rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) /* connection->peer_devices protected by genl_lock() here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) idr_for_each_entry(&connection->peer_devices, peer_device, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) if (!device->bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (drbd_bm_init(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) return ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) struct crypto {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) struct crypto_shash *verify_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) struct crypto_shash *csums_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) struct crypto_shash *cram_hmac_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) struct crypto_shash *integrity_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) if (!tfm_name[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) *tfm = crypto_alloc_shash(tfm_name, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (IS_ERR(*tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) *tfm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return err_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) static enum drbd_ret_code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) char hmac_name[CRYPTO_MAX_ALG_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) enum drbd_ret_code rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) ERR_CSUMS_ALG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (rv != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) ERR_VERIFY_ALG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (rv != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) ERR_INTEGRITY_ALG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (rv != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) if (new_net_conf->cram_hmac_alg[0] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) new_net_conf->cram_hmac_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) ERR_AUTH_ALG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) static void free_crypto(struct crypto *crypto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) crypto_free_shash(crypto->cram_hmac_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) crypto_free_shash(crypto->integrity_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) crypto_free_shash(crypto->csums_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) crypto_free_shash(crypto->verify_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) struct net_conf *old_net_conf, *new_net_conf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) int ovr; /* online verify running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) int rsr; /* re-sync running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) struct crypto crypto = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) connection = adm_ctx.connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (!new_net_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) conn_reconfig_start(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) mutex_lock(&connection->data.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) mutex_lock(&connection->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) old_net_conf = connection->net_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) if (!old_net_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) retcode = ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) *new_net_conf = *old_net_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) if (should_set_defaults(info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) set_net_conf_defaults(new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) err = net_conf_from_attrs_for_change(new_net_conf, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) if (err && err != -ENOMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) retcode = check_net_options(connection, new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) /* re-sync running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) rsr = conn_resync_running(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) retcode = ERR_CSUMS_RESYNC_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) /* online verify running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) ovr = conn_ov_running(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) retcode = ERR_VERIFY_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) retcode = alloc_crypto(&crypto, new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) rcu_assign_pointer(connection->net_conf, new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) if (!rsr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) crypto_free_shash(connection->csums_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) connection->csums_tfm = crypto.csums_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) crypto.csums_tfm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) if (!ovr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) crypto_free_shash(connection->verify_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) connection->verify_tfm = crypto.verify_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) crypto.verify_tfm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) crypto_free_shash(connection->integrity_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) connection->integrity_tfm = crypto.integrity_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) /* Do this without trying to take connection->data.mutex again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) crypto_free_shash(connection->cram_hmac_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) mutex_unlock(&connection->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) mutex_unlock(&connection->data.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) kfree(old_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) if (connection->cstate >= C_WF_REPORT_PARAMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) drbd_send_sync_param(peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) mutex_unlock(&connection->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) mutex_unlock(&connection->data.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) free_crypto(&crypto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) kfree(new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) conn_reconfig_done(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) static void connection_to_info(struct connection_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) info->conn_connection_state = connection->cstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) info->conn_role = conn_highest_peer(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static void peer_device_to_info(struct peer_device_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) struct drbd_peer_device *peer_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) info->peer_repl_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) info->peer_disk_state = device->state.pdsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) info->peer_resync_susp_user = device->state.user_isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) info->peer_resync_susp_peer = device->state.peer_isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) info->peer_resync_susp_dependency = device->state.aftr_isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) struct connection_info connection_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) enum drbd_notification_type flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) unsigned int peer_devices = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) struct net_conf *old_net_conf, *new_net_conf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) struct crypto crypto = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) retcode = ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) /* No need for _rcu here. All reconfiguration is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) * strictly serialized on genl_lock(). We are protected against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) * concurrent reconfiguration/addition/deletion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) for_each_resource(resource, &drbd_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) for_each_connection(connection, resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) connection->my_addr_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) retcode = ERR_LOCAL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) connection->peer_addr_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) retcode = ERR_PEER_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) connection = first_connection(adm_ctx.resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) conn_reconfig_start(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) if (connection->cstate > C_STANDALONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) retcode = ERR_NET_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) /* allocation not in the IO path, drbdsetup / netlink process context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (!new_net_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) set_net_conf_defaults(new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) err = net_conf_from_attrs(new_net_conf, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) if (err && err != -ENOMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) retcode = check_net_options(connection, new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) retcode = alloc_crypto(&crypto, new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) drbd_flush_workqueue(&connection->sender_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) mutex_lock(&adm_ctx.resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) old_net_conf = connection->net_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) if (old_net_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) retcode = ERR_NET_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) mutex_unlock(&adm_ctx.resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) rcu_assign_pointer(connection->net_conf, new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) conn_free_crypto(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) connection->integrity_tfm = crypto.integrity_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) connection->csums_tfm = crypto.csums_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) connection->verify_tfm = crypto.verify_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) connection->my_addr_len = nla_len(adm_ctx.my_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) idr_for_each_entry(&connection->peer_devices, peer_device, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) peer_devices++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) connection_to_info(&connection_info, connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) mutex_lock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) idr_for_each_entry(&connection->peer_devices, peer_device, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) struct peer_device_info peer_device_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) peer_device_to_info(&peer_device_info, peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) mutex_unlock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) mutex_unlock(&adm_ctx.resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) idr_for_each_entry(&connection->peer_devices, peer_device, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) device->send_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) device->recv_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) conn_reconfig_done(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) free_crypto(&crypto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) kfree(new_net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) conn_reconfig_done(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) enum drbd_conns cstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) force ? CS_HARD : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) switch (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) case SS_NOTHING_TO_DO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) case SS_ALREADY_STANDALONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) return SS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) case SS_PRIMARY_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) /* Our state checking code wants to see the peer outdated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) case SS_CW_FAILED_BY_PEER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) spin_lock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) cstate = connection->cstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) spin_unlock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) if (cstate <= C_WF_CONNECTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) /* The peer probably wants to see us outdated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) disk, D_OUTDATED), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) CS_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) default:;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) /* no special handling necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (rv >= SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) enum drbd_state_rv rv2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) /* No one else can reconfigure the network while I am here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) * The state handling only uses drbd_thread_stop_nowait(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) * we want to really wait here until the receiver is no more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) drbd_thread_stop(&connection->receiver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) /* Race breaker. This additional state change request may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) * necessary, if this was a forced disconnect during a receiver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) * restart. We may have "killed" the receiver thread just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) * after drbd_receiver() returned. Typically, we should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) * C_STANDALONE already, now, and this becomes a no-op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) CS_VERBOSE | CS_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) if (rv2 < SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) drbd_err(connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) "unexpected rv2=%d in conn_try_disconnect()\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) rv2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) /* Unlike in DRBD 9, the state engine has generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) * NOTIFY_DESTROY events before clearing connection->net_conf. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) struct disconnect_parms parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) connection = adm_ctx.connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) memset(&parms, 0, sizeof(parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) err = disconnect_parms_from_attrs(&parms, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) rv = conn_try_disconnect(connection, parms.force_disconnect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) if (rv < SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) retcode = rv; /* FIXME: Type mismatch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) retcode = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) void resync_after_online_grow(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) int iass; /* I am sync source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) drbd_info(device, "Resync of new storage after online grow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) if (device->state.role != device->state.peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) iass = (device->state.role == R_PRIMARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) if (iass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) drbd_start_resync(device, C_SYNC_SOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) struct resize_parms rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) enum determine_dev_size dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) bool change_al_layout = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) enum dds_flags ddsf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) sector_t u_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) device = adm_ctx.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (!get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) retcode = ERR_NO_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) memset(&rs, 0, sizeof(struct resize_parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) rs.al_stripes = device->ldev->md.al_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) err = resize_parms_from_attrs(&rs, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) goto fail_ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (device->state.conn > C_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) retcode = ERR_RESIZE_RESYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) goto fail_ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (device->state.role == R_SECONDARY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) device->state.peer == R_SECONDARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) retcode = ERR_NO_PRIMARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) goto fail_ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) retcode = ERR_NEED_APV_93;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) goto fail_ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) if (u_size != (sector_t)rs.resize_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) if (!new_disk_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) goto fail_ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) if (device->ldev->md.al_stripes != rs.al_stripes ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (al_size_k > (16 * 1024 * 1024)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) retcode = ERR_MD_LAYOUT_TOO_BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) goto fail_ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) if (al_size_k < MD_32kB_SECT/2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) retcode = ERR_MD_LAYOUT_TOO_SMALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) goto fail_ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) if (device->state.conn != C_CONNECTED && !rs.resize_force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) retcode = ERR_MD_LAYOUT_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) goto fail_ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) change_al_layout = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) if (new_disk_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) mutex_lock(&device->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) old_disk_conf = device->ldev->disk_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) *new_disk_conf = *old_disk_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) new_disk_conf->disk_size = (sector_t)rs.resize_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) mutex_unlock(&device->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) kfree(old_disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) new_disk_conf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) drbd_md_sync(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (dd == DS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) retcode = ERR_NOMEM_BITMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) } else if (dd == DS_ERROR_SPACE_MD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) retcode = ERR_MD_LAYOUT_NO_FIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) } else if (dd == DS_ERROR_SHRINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) retcode = ERR_IMPLICIT_SHRINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) if (device->state.conn == C_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) if (dd == DS_GREW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) set_bit(RESIZE_PENDING, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) drbd_send_uuids(first_peer_device(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) drbd_send_sizes(first_peer_device(device), 1, ddsf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) fail_ldev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) kfree(new_disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) struct res_opts res_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) res_opts = adm_ctx.resource->res_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) if (should_set_defaults(info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) set_res_opts_defaults(&res_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) err = res_opts_from_attrs(&res_opts, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) if (err && err != -ENOMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) err = set_resource_options(adm_ctx.resource, &res_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) retcode = ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) if (err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) device = adm_ctx.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) if (!get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) retcode = ERR_NO_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) /* If there is still bitmap IO pending, probably because of a previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) * resync just being finished, wait for it before requesting a new resync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) * Also wait for it's after_state_ch(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) drbd_suspend_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) /* If we happen to be C_STANDALONE R_SECONDARY, just change to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) * try to start a resync handshake as sync target for full sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) if (retcode >= SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) "set_n_write from invalidate", BM_LOCKED_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) retcode = ERR_IO_MD_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) drbd_resume_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) union drbd_state mask, union drbd_state val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) retcode = drbd_request_state(adm_ctx.device, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) rv = drbd_bmio_set_n_write(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) drbd_suspend_al(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) int retcode; /* drbd_ret_code, drbd_state_rv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) device = adm_ctx.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) if (!get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) retcode = ERR_NO_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) /* If there is still bitmap IO pending, probably because of a previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) * resync just being finished, wait for it before requesting a new resync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) * Also wait for it's after_state_ch(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) drbd_suspend_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) * in the bitmap. Otherwise, try to start a resync handshake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) * as sync source for full sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) /* The peer will get a resync upon connect anyways. Just make that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) into a full resync. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) if (retcode >= SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) "set_n_write from invalidate_peer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) BM_LOCKED_SET_ALLOWED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) retcode = ERR_IO_MD_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) drbd_resume_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) retcode = ERR_PAUSE_IS_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) union drbd_dev_state s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) s = adm_ctx.device->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) retcode = ERR_PAUSE_IS_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) device = adm_ctx.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) if (test_bit(NEW_CUR_UUID, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) if (get_ldev_if_state(device, D_ATTACHING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) drbd_uuid_new_current(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) /* This is effectively a multi-stage "forced down".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) * The NEW_CUR_UUID bit is supposedly only set, if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) * lost the replication connection, and are configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) * to freeze IO and wait for some fence-peer handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) * So we still don't have a replication connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) * And now we don't have a local disk either. After
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) * resume, we will fail all pending and new IO, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) * we don't have any data anymore. Which means we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) * eventually be able to terminate all users of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) * device, and then take it down. By bumping the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) * "effective" data uuid, we make sure that you really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) * need to tear down before you reconfigure, we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) * the refuse to re-connect or re-attach (because no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) * matching real data uuid exists).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) get_random_bytes(&val, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) drbd_set_ed_uuid(device, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) clear_bit(NEW_CUR_UUID, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) drbd_suspend_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (retcode == SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) if (device->state.conn < C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) tl_clear(first_peer_device(device)->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) drbd_resume_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) static int nla_put_drbd_cfg_context(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) struct drbd_resource *resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) struct drbd_connection *connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) if (device &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) nla_put_u32(skb, T_ctx_volume, device->vnr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) if (nla_put_string(skb, T_ctx_resource_name, resource->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) if (connection) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) if (connection->my_addr_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) if (connection->peer_addr_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) nla_nest_end(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) if (nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) nla_nest_cancel(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) * The generic netlink dump callbacks are called outside the genl_lock(), so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) * they cannot use the simple attribute parsing code which uses global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) * attribute tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) DRBD_NLA_CFG_CONTEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) static void resource_to_info(struct resource_info *, struct drbd_resource *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) struct resource_info resource_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) struct resource_statistics resource_statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) if (cb->args[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) for_each_resource_rcu(resource, &drbd_resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (resource == (struct drbd_resource *)cb->args[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) goto found_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) err = 0; /* resource was probably deleted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) resource = list_entry(&drbd_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) struct drbd_resource, resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) found_resource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) goto put_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) put_result:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) cb->nlh->nlmsg_seq, &drbd_genl_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) dh->minor = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) dh->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) resource_to_info(&resource_info, resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) resource_statistics.res_stat_write_ordering = resource->write_ordering;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) cb->args[0] = (long)resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) static void device_to_statistics(struct device_statistics *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) memset(s, 0, sizeof(*s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) s->dev_upper_blocked = !may_inc_ap_bio(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) if (get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) struct drbd_md *md = &device->ldev->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) u64 *history_uuids = (u64 *)s->history_uuids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) spin_lock_irq(&md->uuid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) s->dev_current_uuid = md->uuid[UI_CURRENT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) history_uuids[n] = md->uuid[UI_HISTORY_START + n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) for (; n < HISTORY_UUIDS; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) history_uuids[n] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) s->history_uuids_len = HISTORY_UUIDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) spin_unlock_irq(&md->uuid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) s->dev_disk_flags = md->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) s->dev_size = get_capacity(device->vdisk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) s->dev_read = device->read_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) s->dev_write = device->writ_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) s->dev_al_writes = device->al_writ_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) s->dev_bm_writes = device->bm_writ_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) s->dev_lower_pending = atomic_read(&device->local_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) s->dev_exposed_data_uuid = device->ed_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) if (cb->args[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) struct drbd_resource *resource =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) (struct drbd_resource *)cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) kref_put(&resource->kref, drbd_destroy_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) return put_resource_in_arg0(cb, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) static void device_to_info(struct device_info *, struct drbd_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) struct nlattr *resource_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) int minor, err, retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) struct device_info device_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) struct device_statistics device_statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) struct idr *idr_to_search;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) resource = (struct drbd_resource *)cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) if (!cb->args[0] && !cb->args[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) if (resource_filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) retcode = ERR_RES_NOT_KNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) resource = drbd_find_resource(nla_data(resource_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) if (!resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) goto put_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) cb->args[0] = (long)resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) minor = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) idr_to_search = resource ? &resource->devices : &drbd_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) device = idr_get_next(idr_to_search, &minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) if (!device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) idr_for_each_entry_continue(idr_to_search, device, minor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) retcode = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) goto put_result; /* only one iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) goto out; /* no more devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) put_result:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) cb->nlh->nlmsg_seq, &drbd_genl_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) dh->ret_code = retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) dh->minor = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) if (retcode == NO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) dh->minor = device->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) if (get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) struct disk_conf *disk_conf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) rcu_dereference(device->ldev->disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) device_to_info(&device_info, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) device_to_statistics(&device_statistics, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) cb->args[1] = minor + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) int drbd_adm_dump_connections_done(struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) return put_resource_in_arg0(cb, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) struct nlattr *resource_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) struct drbd_resource *resource = NULL, *next_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) int err = 0, retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) struct connection_info connection_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) struct connection_statistics connection_statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) resource = (struct drbd_resource *)cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) if (!cb->args[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) if (resource_filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) retcode = ERR_RES_NOT_KNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) resource = drbd_find_resource(nla_data(resource_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) if (!resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) goto put_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) cb->args[0] = (long)resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) cb->args[1] = SINGLE_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) if (!resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) if (list_empty(&drbd_resources))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) kref_get(&resource->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) cb->args[0] = (long)resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) cb->args[1] = ITERATE_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) next_resource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) mutex_lock(&resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) if (cb->args[2]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) for_each_connection_rcu(connection, resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) if (connection == (struct drbd_connection *)cb->args[2])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) goto found_connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) /* connection was probably deleted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) goto no_more_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) connection = list_entry(&resource->connections, struct drbd_connection, connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) found_connection:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) if (!has_net_conf(connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) retcode = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) goto put_result; /* only one iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) no_more_connections:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) if (cb->args[1] == ITERATE_RESOURCES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) for_each_resource_rcu(next_resource, &drbd_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) if (next_resource == resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) goto found_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) /* resource was probably deleted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) found_resource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) mutex_unlock(&resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) kref_put(&resource->kref, drbd_destroy_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) resource = next_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) kref_get(&resource->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) cb->args[0] = (long)resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) cb->args[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) goto next_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) goto out; /* no more resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) put_result:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) cb->nlh->nlmsg_seq, &drbd_genl_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) dh->ret_code = retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) dh->minor = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) if (retcode == NO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) struct net_conf *net_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) net_conf = rcu_dereference(connection->net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) if (net_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) connection_to_info(&connection_info, connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) cb->args[2] = (long)connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) if (resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) mutex_unlock(&resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) enum mdf_peer_flag {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) MDF_PEER_CONNECTED = 1 << 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) MDF_PEER_OUTDATED = 1 << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) MDF_PEER_FENCING = 1 << 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) MDF_PEER_FULL_SYNC = 1 << 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) static void peer_device_to_statistics(struct peer_device_statistics *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) struct drbd_peer_device *peer_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) memset(s, 0, sizeof(*s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) s->peer_dev_received = device->recv_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) s->peer_dev_sent = device->send_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) atomic_read(&device->rs_pending_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) if (get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) struct drbd_md *md = &device->ldev->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) spin_lock_irq(&md->uuid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) spin_unlock_irq(&md->uuid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) s->peer_dev_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) MDF_PEER_CONNECTED : 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) MDF_PEER_OUTDATED : 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) /* FIXME: MDF_PEER_FENCING? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) MDF_PEER_FULL_SYNC : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) return put_resource_in_arg0(cb, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) struct nlattr *resource_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) struct drbd_peer_device *peer_device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) int minor, err, retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) struct idr *idr_to_search;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) resource = (struct drbd_resource *)cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) if (!cb->args[0] && !cb->args[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) if (resource_filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) retcode = ERR_RES_NOT_KNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) resource = drbd_find_resource(nla_data(resource_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) if (!resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) goto put_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) cb->args[0] = (long)resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) minor = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) idr_to_search = resource ? &resource->devices : &drbd_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) device = idr_find(idr_to_search, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) if (!device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) next_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) minor++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) cb->args[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) device = idr_get_next(idr_to_search, &minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) if (!device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) if (cb->args[2]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) for_each_peer_device(peer_device, device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) if (peer_device == (struct drbd_peer_device *)cb->args[2])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) goto found_peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) /* peer device was probably deleted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) goto next_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) /* Make peer_device point to the list head (not the first entry). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) found_peer_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) if (!has_net_conf(peer_device->connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) retcode = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) goto put_result; /* only one iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) goto next_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) put_result:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) cb->nlh->nlmsg_seq, &drbd_genl_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) dh->ret_code = retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) dh->minor = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) if (retcode == NO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) struct peer_device_info peer_device_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) struct peer_device_statistics peer_device_statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) dh->minor = minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) peer_device_to_info(&peer_device_info, peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) peer_device_to_statistics(&peer_device_statistics, peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) cb->args[1] = minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) cb->args[2] = (long)peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) * Return the connection of @resource if @resource has exactly one connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) struct list_head *connections = &resource->connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) if (list_empty(connections) || connections->next->next != connections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) return list_first_entry(&resource->connections, struct drbd_connection, connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) const struct sib_info *sib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) struct drbd_resource *resource = device->resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) struct state_info *si = NULL; /* for sizeof(si->member); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) int got_ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) int exclude_sensitive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) * to. So we better exclude_sensitive information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) * If sib == NULL, this is drbd_adm_get_status, executed synchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) * in the context of the requesting user process. Exclude sensitive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) * information, unless current has superuser.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) * relies on the current implementation of netlink_dump(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) * executes the dump callback successively from netlink_recvmsg(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) * always in the context of the receiving process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) got_ldev = get_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) /* We need to add connection name and volume number information still.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) * Minor number is in drbd_genlmsghdr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) if (got_ldev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) struct disk_conf *disk_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) disk_conf = rcu_dereference(device->ldev->disk_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) struct net_conf *nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) if (nc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) err = net_conf_to_skb(skb, nc, exclude_sensitive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) nla_put_u32(skb, T_current_state, device->state.i) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) nla_put_u64_0pad(skb, T_capacity, get_capacity(device->vdisk)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) if (got_ldev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) spin_lock_irq(&device->ldev->md.uuid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) spin_unlock_irq(&device->ldev->md.uuid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) nla_put_u64_0pad(skb, T_bits_oos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) drbd_bm_total_weight(device)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) if (C_SYNC_SOURCE <= device->state.conn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) C_PAUSED_SYNC_T >= device->state.conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) if (nla_put_u64_0pad(skb, T_bits_rs_total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) device->rs_total) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) nla_put_u64_0pad(skb, T_bits_rs_failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) device->rs_failed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) if (sib) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) switch(sib->sib_reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) case SIB_SYNC_PROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) case SIB_GET_STATUS_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) case SIB_STATE_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) nla_put_u32(skb, T_new_state, sib->ns.i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) case SIB_HELPER_POST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) if (nla_put_u32(skb, T_helper_exit_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) sib->helper_exit_code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) case SIB_HELPER_PRE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) if (nla_put_string(skb, T_helper, sib->helper_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) nla_nest_end(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) if (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) if (got_ldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) nlmsg_free(adm_ctx.reply_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) struct drbd_resource *resource = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) struct drbd_resource *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) unsigned volume = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) /* Open coded, deferred, iteration:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) * for_each_resource_safe(resource, tmp, &drbd_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) * connection = "first connection of resource or undefined";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) * idr_for_each_entry(&resource->devices, device, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) * where resource is cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) * and i is cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) * cb->args[2] indicates if we shall loop over all resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) * or just dump all volumes of a single resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) * This may miss entries inserted after this dump started,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) * or entries deleted before they are reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) * We need to make sure the device won't disappear while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) * we are looking at it, and revalidate our iterators
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) * on each iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) /* synchronize with conn_create()/drbd_destroy_connection() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) /* revalidate iterator position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) for_each_resource_rcu(tmp, &drbd_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) if (pos == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) /* first iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) pos = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) resource = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) if (tmp == pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) resource = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) if (resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) next_resource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) device = idr_get_next(&resource->devices, &volume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) if (!device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) /* No more volumes to dump on this resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) * Advance resource iterator. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) pos = list_entry_rcu(resource->resources.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) struct drbd_resource, resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) /* Did we dump any volume of this resource yet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) if (volume != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) /* If we reached the end of the list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) * or only a single resource dump was requested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) * we are done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) if (&pos->resources == &drbd_resources || cb->args[2])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) volume = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) resource = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) goto next_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) cb->nlh->nlmsg_seq, &drbd_genl_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) NLM_F_MULTI, DRBD_ADM_GET_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) if (!device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) /* This is a connection without a single volume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) * Suprisingly enough, it may have a network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) * configuration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) dh->minor = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) dh->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) connection = the_only_connection(resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) goto cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) if (connection) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) struct net_conf *nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) nc = rcu_dereference(connection->net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) if (nc && net_conf_to_skb(skb, nc, 1) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) goto cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) D_ASSERT(device, device->vnr == volume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) D_ASSERT(device, device->resource == resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) dh->minor = device_to_minor(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) dh->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) if (nla_put_status_info(skb, device, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) genlmsg_cancel(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) /* where to start the next iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) cb->args[0] = (long)pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) cb->args[1] = (pos == resource) ? volume + 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) /* No more resources/volumes/minors found results in an empty skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) * Which will terminate the dump. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) * Request status of all resources, or of all volumes within a single resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) * This is a dump, as the answer may not fit in a single reply skb otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) * Which means we cannot use the family->attrbuf or other such members, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) * dump is NOT protected by the genl_lock(). During dump, we only have access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) * Once things are setup properly, we call into get_one_status().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) const char *resource_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) int maxtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) /* Is this a followup call? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) if (cb->args[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) /* ... of a single resource dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) * and the resource iterator has been advanced already? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) if (cb->args[2] && cb->args[2] != cb->args[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) return 0; /* DONE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) goto dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) /* First call (from netlink_dump_start). We need to figure out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) * which resource(s) the user wants us to dump. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) nlmsg_attrlen(cb->nlh, hdrlen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) DRBD_NLA_CFG_CONTEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) /* No explicit context given. Dump all. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) goto dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) if (IS_ERR(nla))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) return PTR_ERR(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) /* context given, but no name present? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) resource_name = nla_data(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) if (!*resource_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) resource = drbd_find_resource(resource_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) if (!resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) /* prime iterators, and set "filter" mode mark:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) * only dump this connection. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) cb->args[0] = (long)resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) /* cb->args[1] = 0; passed in this way. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) cb->args[2] = (long)resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) dump:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) return get_one_status(skb, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) struct timeout_parms tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) tp.timeout_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) UT_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) nlmsg_free(adm_ctx.reply_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) struct start_ov_parms parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) device = adm_ctx.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) /* resume from last known position, if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) parms.ov_start_sector = device->ov_start_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) parms.ov_stop_sector = ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) int err = start_ov_parms_from_attrs(&parms, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) /* w_make_ov_request expects position to be aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) device->ov_stop_sector = parms.ov_stop_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) /* If there is still bitmap IO pending, e.g. previous resync or verify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) * just being finished, wait for it before requesting a new resync. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) drbd_suspend_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) drbd_resume_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) int skip_initial_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) struct new_c_uuid_parms args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) goto out_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) device = adm_ctx.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) memset(&args, 0, sizeof(args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) err = new_c_uuid_parms_from_attrs(&args, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) goto out_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) if (!get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) retcode = ERR_NO_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) /* this is "skip initial sync", assume to be clean */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) if (device->state.conn == C_CONNECTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) first_peer_device(device)->connection->agreed_pro_version >= 90 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) drbd_info(device, "Preparing to skip initial sync\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) skip_initial_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) } else if (device->state.conn != C_STANDALONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) retcode = ERR_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) goto out_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) if (args.clear_bm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) drbd_err(device, "Writing bitmap failed with %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) retcode = ERR_IO_MD_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) if (skip_initial_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) drbd_send_uuids_skip_initial_sync(first_peer_device(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) _drbd_uuid_set(device, UI_BITMAP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) drbd_print_uuids(device, "cleared bitmap UUID");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) spin_lock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) CS_VERBOSE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) spin_unlock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) drbd_md_sync(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) out_dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) mutex_unlock(device->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) out_nolock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) static enum drbd_ret_code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) drbd_check_resource_name(struct drbd_config_context *adm_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) const char *name = adm_ctx->resource_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) if (!name || !name[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) return ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) /* if we want to use these in sysfs/configfs/debugfs some day,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) * we must not allow slashes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) if (strchr(name, '/')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) return ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) return NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) static void resource_to_info(struct resource_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) struct drbd_resource *resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) info->res_role = conn_highest_role(first_connection(resource));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) info->res_susp = resource->susp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) info->res_susp_nod = resource->susp_nod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) info->res_susp_fen = resource->susp_fen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) struct res_opts res_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) set_res_opts_defaults(&res_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) err = res_opts_from_attrs(&res_opts, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) if (err && err != -ENOMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) retcode = ERR_MANDATORY_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) retcode = drbd_check_resource_name(&adm_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) if (adm_ctx.resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) retcode = ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) /* else: still NO_ERROR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) /* not yet safe for genl_family.parallel_ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) mutex_lock(&resources_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) connection = conn_create(adm_ctx.resource_name, &res_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) mutex_unlock(&resources_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) if (connection) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) struct resource_info resource_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) mutex_lock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) resource_to_info(&resource_info, connection->resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) notify_resource_state(NULL, 0, connection->resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) &resource_info, NOTIFY_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) mutex_unlock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) retcode = ERR_NOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) static void device_to_info(struct device_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) info->dev_disk_state = device->state.disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) struct drbd_genlmsghdr *dh = info->userhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) if (dh->minor > MINORMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) retcode = ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) if (adm_ctx.volume > DRBD_VOLUME_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) retcode = ERR_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) /* drbd_adm_prepare made sure already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) * that first_peer_device(device)->connection and device->vnr match the request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) if (adm_ctx.device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) retcode = ERR_MINOR_OR_VOLUME_EXISTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) /* else: still NO_ERROR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) retcode = drbd_create_device(&adm_ctx, dh->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) if (retcode == NO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) struct device_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) unsigned int peer_devices = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) enum drbd_notification_type flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) device = minor_to_device(dh->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) for_each_peer_device(peer_device, device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) if (!has_net_conf(peer_device->connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) peer_devices++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) device_to_info(&info, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) mutex_lock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) for_each_peer_device(peer_device, device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) struct peer_device_info peer_device_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) if (!has_net_conf(peer_device->connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) peer_device_to_info(&peer_device_info, peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) NOTIFY_CREATE | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) mutex_unlock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) if (device->state.disk == D_DISKLESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) /* no need to be device->state.conn == C_STANDALONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) * we may want to delete a minor from a live replication group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) device->state.role == R_SECONDARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) struct drbd_connection *connection =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) first_connection(device->resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) CS_VERBOSE + CS_WAIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) /* If the state engine hasn't stopped the sender thread yet, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) * need to flush the sender work queue before generating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) * DESTROY events here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) if (get_t_state(&connection->worker) == RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) drbd_flush_workqueue(&connection->sender_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) mutex_lock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) for_each_peer_device(peer_device, device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) if (!has_net_conf(peer_device->connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) notify_peer_device_state(NULL, 0, peer_device, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) NOTIFY_DESTROY | NOTIFY_CONTINUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) mutex_unlock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) drbd_delete_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) return NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) return ERR_MINOR_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) mutex_lock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) retcode = adm_del_minor(adm_ctx.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) mutex_unlock(&adm_ctx.resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) static int adm_del_resource(struct drbd_resource *resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) for_each_connection(connection, resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) if (connection->cstate > C_STANDALONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) return ERR_NET_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) if (!idr_is_empty(&resource->devices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) return ERR_RES_IN_USE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) /* The state engine has stopped the sender thread, so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) * need to flush the sender work queue before generating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) * DESTROY event here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) mutex_lock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) mutex_unlock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) mutex_lock(&resources_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) list_del_rcu(&resource->resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) mutex_unlock(&resources_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) /* Make sure all threads have actually stopped: state handling only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) * does drbd_thread_stop_nowait(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) list_for_each_entry(connection, &resource->connections, connections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) drbd_thread_stop(&connection->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) drbd_free_resource(resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) return NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) resource = adm_ctx.resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) mutex_lock(&resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) /* demote */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) for_each_connection(connection, resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) idr_for_each_entry(&connection->peer_devices, peer_device, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) if (retcode < SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) retcode = conn_try_disconnect(connection, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) if (retcode < SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) /* detach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) idr_for_each_entry(&resource->devices, device, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) retcode = adm_detach(device, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) /* delete volumes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) idr_for_each_entry(&resource->devices, device, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) retcode = adm_del_minor(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) if (retcode != NO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) /* "can not happen" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) retcode = adm_del_resource(resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) mutex_unlock(&resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) struct drbd_config_context adm_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) enum drbd_ret_code retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) if (!adm_ctx.reply_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) if (retcode != NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) resource = adm_ctx.resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) mutex_lock(&resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) retcode = adm_del_resource(resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) mutex_unlock(&resource->adm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) drbd_adm_finish(&adm_ctx, info, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) struct sk_buff *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) struct drbd_genlmsghdr *d_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) seq = atomic_inc_return(&drbd_genl_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) if (!msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) if (!d_out) /* cannot happen, but anyways. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) d_out->minor = device_to_minor(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) d_out->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) if (nla_put_status_info(msg, device, sib))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) genlmsg_end(msg, d_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) /* msg has been consumed or freed in netlink_broadcast() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) if (err && err != -ESRCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) nlmsg_free(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) drbd_err(device, "Error %d while broadcasting event. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) "Event seq:%u sib_reason:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) err, seq, sib->sib_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) static int nla_put_notification_header(struct sk_buff *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) enum drbd_notification_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) struct drbd_notification_header nh = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) .nh_type = type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) return drbd_notification_header_to_skb(msg, &nh, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) void notify_resource_state(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) unsigned int seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) struct drbd_resource *resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) struct resource_info *resource_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) enum drbd_notification_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) struct resource_statistics resource_statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) bool multicast = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) seq = atomic_inc_return(¬ify_genl_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) multicast = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) dh->minor = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) dh->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) nla_put_notification_header(skb, type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) resource_info_to_skb(skb, resource_info, true)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) resource_statistics.res_stat_write_ordering = resource->write_ordering;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) if (multicast) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) /* skb has been consumed or freed in netlink_broadcast() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) if (err && err != -ESRCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) err, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) void notify_device_state(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) unsigned int seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) struct device_info *device_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) enum drbd_notification_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) struct device_statistics device_statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) bool multicast = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) seq = atomic_inc_return(¬ify_genl_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) multicast = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) dh->minor = device->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) dh->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) nla_put_notification_header(skb, type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) device_info_to_skb(skb, device_info, true)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) device_to_statistics(&device_statistics, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) if (multicast) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) /* skb has been consumed or freed in netlink_broadcast() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) if (err && err != -ESRCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) err, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) void notify_connection_state(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) unsigned int seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) struct drbd_connection *connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) struct connection_info *connection_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) enum drbd_notification_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) struct connection_statistics connection_statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) bool multicast = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) seq = atomic_inc_return(¬ify_genl_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) multicast = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) dh->minor = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) dh->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) nla_put_notification_header(skb, type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) connection_info_to_skb(skb, connection_info, true)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) if (multicast) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) /* skb has been consumed or freed in netlink_broadcast() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) if (err && err != -ESRCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) err, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) void notify_peer_device_state(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) unsigned int seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) struct drbd_peer_device *peer_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) struct peer_device_info *peer_device_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) enum drbd_notification_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) struct peer_device_statistics peer_device_statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) struct drbd_resource *resource = peer_device->device->resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) bool multicast = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) seq = atomic_inc_return(¬ify_genl_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) multicast = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) dh->minor = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) dh->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) nla_put_notification_header(skb, type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) peer_device_info_to_skb(skb, peer_device_info, true)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) peer_device_to_statistics(&peer_device_statistics, peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) if (multicast) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) /* skb has been consumed or freed in netlink_broadcast() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) if (err && err != -ESRCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) err, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) void notify_helper(enum drbd_notification_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) struct drbd_device *device, struct drbd_connection *connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) const char *name, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) struct drbd_resource *resource = device ? device->resource : connection->resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) struct drbd_helper_info helper_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) unsigned int seq = atomic_inc_return(¬ify_genl_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) helper_info.helper_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) dh->minor = device ? device->minor : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) dh->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) mutex_lock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) nla_put_notification_header(skb, type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) drbd_helper_info_to_skb(skb, &helper_info, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) goto unlock_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) /* skb has been consumed or freed in netlink_broadcast() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) if (err && err != -ESRCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) goto unlock_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) mutex_unlock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) unlock_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) mutex_unlock(¬ification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) err, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) struct drbd_genlmsghdr *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) if (!dh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) dh->minor = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) dh->ret_code = NO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) if (nla_put_notification_header(skb, NOTIFY_EXISTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) genlmsg_end(skb, dh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) pr_err("Error %d sending event. Event seq:%u\n", err, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) static void free_state_changes(struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) while (!list_empty(list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) struct drbd_state_change *state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) list_first_entry(list, struct drbd_state_change, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) list_del(&state_change->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) forget_state_change(state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) return 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) state_change->n_connections +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) state_change->n_devices +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) state_change->n_devices * state_change->n_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) unsigned int seq = cb->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) enum drbd_notification_type flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) /* There is no need for taking notification_mutex here: it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) matter if the initial state events mix with later state chage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) events; we can always tell the events apart by the NOTIFY_EXISTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) flag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) cb->args[5]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) if (cb->args[5] == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) notify_initial_state_done(skb, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) n = cb->args[4]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) if (cb->args[4] < cb->args[3])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) flags |= NOTIFY_CONTINUES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) if (n < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) notify_resource_state_change(skb, seq, state_change->resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) NOTIFY_EXISTS | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) n--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) if (n < state_change->n_connections) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) notify_connection_state_change(skb, seq, &state_change->connections[n],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) NOTIFY_EXISTS | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) n -= state_change->n_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) if (n < state_change->n_devices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) notify_device_state_change(skb, seq, &state_change->devices[n],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) NOTIFY_EXISTS | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) n -= state_change->n_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) if (n < state_change->n_devices * state_change->n_connections) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) NOTIFY_EXISTS | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) if (cb->args[4] == cb->args[3]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) struct drbd_state_change *next_state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) list_entry(state_change->list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) struct drbd_state_change, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) cb->args[0] = (long)next_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) cb->args[3] = notifications_for_state_change(next_state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) cb->args[4] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) if (cb->args[5] >= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) if (cb->args[5] > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) return get_initial_state(skb, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) if (cb->args[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) struct drbd_state_change *state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) (struct drbd_state_change *)cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) /* connect list to head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) list_add(&head, &state_change->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) free_state_changes(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) cb->args[5] = 2; /* number of iterations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) mutex_lock(&resources_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) for_each_resource(resource, &drbd_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) struct drbd_state_change *state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) state_change = remember_old_state(resource, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) if (!state_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) if (!list_empty(&head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) free_state_changes(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) mutex_unlock(&resources_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) copy_old_to_new_state_change(state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) list_add_tail(&state_change->list, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) cb->args[5] += notifications_for_state_change(state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) mutex_unlock(&resources_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) if (!list_empty(&head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) struct drbd_state_change *state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) list_entry(head.next, struct drbd_state_change, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) cb->args[0] = (long)state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) cb->args[3] = notifications_for_state_change(state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) list_del(&head); /* detach list from head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) cb->args[2] = cb->nlh->nlmsg_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) return get_initial_state(skb, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) }