^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015, SUSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dlm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/raid/md_p.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "md.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "md-bitmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "md-cluster.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define LVB_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define NEW_DEV_TIMEOUT 5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct dlm_lock_resource {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) dlm_lockspace_t *ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct dlm_lksb lksb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) char *name; /* lock name. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) uint32_t flags; /* flags to pass to dlm_lock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) wait_queue_head_t sync_locking; /* wait queue for synchronized locking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) bool sync_locking_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct mddev *mddev; /* pointing back to mddev. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct resync_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) __le64 lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) __le64 hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* md_cluster_info flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define MD_CLUSTER_WAITING_FOR_NEWDISK 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define MD_CLUSTER_SUSPEND_READ_BALANCING 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define MD_CLUSTER_BEGIN_JOIN_CLUSTER 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Lock the send communication. This is done through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * bit manipulation as opposed to a mutex in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * accomodate lock and hold. See next comment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define MD_CLUSTER_SEND_LOCK 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* If cluster operations (such as adding a disk) must lock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * communication channel, so as to perform extra operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * (update metadata) and no other operation is allowed on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * MD. Token needs to be locked and held until the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * completes witha md_update_sb(), which would eventually release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define MD_CLUSTER_SEND_LOCKED_ALREADY 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* We should receive message after node joined cluster and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * set up all the related infos such as bitmap and personality */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define MD_CLUSTER_ALREADY_IN_CLUSTER 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define MD_CLUSTER_PENDING_RECV_EVENT 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct md_cluster_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct mddev *mddev; /* the md device which md_cluster_info belongs to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* dlm lock space and resources for clustered raid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) dlm_lockspace_t *lockspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int slot_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct mutex recv_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct dlm_lock_resource *bitmap_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct dlm_lock_resource **other_bitmap_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct dlm_lock_resource *resync_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct list_head suspend_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) spinlock_t suspend_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* record the region which write should be suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) sector_t suspend_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) sector_t suspend_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int suspend_from; /* the slot which broadcast suspend_lo/hi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct md_thread *recovery_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long recovery_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* communication loc resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct dlm_lock_resource *ack_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct dlm_lock_resource *message_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct dlm_lock_resource *token_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct dlm_lock_resource *no_new_dev_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct md_thread *recv_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct completion newdisk_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* record the region in RESYNCING message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) sector_t sync_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) sector_t sync_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) enum msg_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) METADATA_UPDATED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) RESYNCING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) NEWDISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) REMOVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) RE_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) BITMAP_NEEDS_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) CHANGE_CAPACITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) BITMAP_RESIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct cluster_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) __le32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) __le32 slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* TODO: Unionize this for smaller footprint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) __le64 low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) __le64 high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) char uuid[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) __le32 raid_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void sync_ast(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct dlm_lock_resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) res = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) res->sync_locking_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) wake_up(&res->sync_locking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int dlm_lock_sync(struct dlm_lock_resource *res, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ret = dlm_lock(res->ls, mode, &res->lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) res->flags, res->name, strlen(res->name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 0, sync_ast, res, res->bast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) wait_event(res->sync_locking, res->sync_locking_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) res->sync_locking_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (res->lksb.sb_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) res->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return res->lksb.sb_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int dlm_unlock_sync(struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return dlm_lock_sync(res, DLM_LOCK_NL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * An variation of dlm_lock_sync, which make lock request could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * be interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static int dlm_lock_sync_interruptible(struct dlm_lock_resource *res, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ret = dlm_lock(res->ls, mode, &res->lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) res->flags, res->name, strlen(res->name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 0, sync_ast, res, res->bast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) wait_event(res->sync_locking, res->sync_locking_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) || kthread_should_stop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) || test_bit(MD_CLOSING, &mddev->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!res->sync_locking_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * the convert queue contains the lock request when request is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * interrupted, and sync_ast could still be run, so need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * cancel the request and reset completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_CANCEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) &res->lksb, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) res->sync_locking_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (unlikely(ret != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) pr_info("failed to cancel previous lock request "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) "%s return %d\n", res->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) res->sync_locking_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (res->lksb.sb_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) res->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return res->lksb.sb_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) char *name, void (*bastfn)(void *arg, int mode), int with_lvb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct dlm_lock_resource *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int ret, namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) init_waitqueue_head(&res->sync_locking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) res->sync_locking_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) res->ls = cinfo->lockspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) res->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) res->mode = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) namelen = strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) res->name = kzalloc(namelen + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (!res->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) strlcpy(res->name, name, namelen + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (with_lvb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) res->lksb.sb_lvbptr = kzalloc(LVB_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!res->lksb.sb_lvbptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) pr_err("md-cluster: Unable to allocate LVB for resource %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) res->flags = DLM_LKF_VALBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (bastfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) res->bast = bastfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) res->flags |= DLM_LKF_EXPEDITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ret = dlm_lock_sync(res, DLM_LOCK_NL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) pr_err("md-cluster: Unable to lock NL on new lock resource %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) res->flags &= ~DLM_LKF_EXPEDITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) res->flags |= DLM_LKF_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) kfree(res->lksb.sb_lvbptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) kfree(res->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void lockres_free(struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * use FORCEUNLOCK flag, so we can unlock even the lock is on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * waiting or convert queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_FORCEUNLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) &res->lksb, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (unlikely(ret != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) pr_err("failed to unlock %s return %d\n", res->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) wait_event(res->sync_locking, res->sync_locking_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) kfree(res->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) kfree(res->lksb.sb_lvbptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void add_resync_info(struct dlm_lock_resource *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) sector_t lo, sector_t hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct resync_info *ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ri = (struct resync_info *)lockres->lksb.sb_lvbptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ri->lo = cpu_to_le64(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ri->hi = cpu_to_le64(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static int read_resync_info(struct mddev *mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct dlm_lock_resource *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct resync_info ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) dlm_lock_sync(lockres, DLM_LOCK_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (le64_to_cpu(ri.hi) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) cinfo->suspend_hi = le64_to_cpu(ri.hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) cinfo->suspend_lo = le64_to_cpu(ri.lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dlm_unlock_sync(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void recover_bitmaps(struct md_thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct mddev *mddev = thread->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct dlm_lock_resource *bm_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) char str[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int slot, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) sector_t lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) while (cinfo->recovery_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) slot = fls64((u64)cinfo->recovery_map) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) snprintf(str, 64, "bitmap%04d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) bm_lockres = lockres_init(mddev, str, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!bm_lockres) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) pr_err("md-cluster: Cannot initialize bitmaps\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) goto clear_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ret = dlm_lock_sync_interruptible(bm_lockres, DLM_LOCK_PW, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pr_err("md-cluster: Could not DLM lock %s: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) str, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) goto clear_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ret = md_bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) goto clear_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Clear suspend_area associated with the bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) spin_lock_irq(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) cinfo->suspend_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) cinfo->suspend_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cinfo->suspend_from = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) spin_unlock_irq(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Kick off a reshape if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) mddev->reshape_position != MaxSector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) md_wakeup_thread(mddev->sync_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (hi > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (lo < mddev->recovery_cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) mddev->recovery_cp = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* wake up thread to continue resync in case resync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * is not finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (mddev->recovery_cp != MaxSector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * clear the REMOTE flag since we will launch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * resync thread in current node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) clear_bit(MD_RESYNCING_REMOTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) clear_bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) lockres_free(bm_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) clear_bit(slot, &cinfo->recovery_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void recover_prep(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct mddev *mddev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void __recover_slot(struct mddev *mddev, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) set_bit(slot, &cinfo->recovery_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!cinfo->recovery_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) cinfo->recovery_thread = md_register_thread(recover_bitmaps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) mddev, "recover");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!cinfo->recovery_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pr_warn("md-cluster: Could not create recovery thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) md_wakeup_thread(cinfo->recovery_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static void recover_slot(void *arg, struct dlm_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct mddev *mddev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) mddev->bitmap_info.cluster_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) slot->nodeid, slot->slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) cinfo->slot_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* deduct one since dlm slot starts from one while the num of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * cluster-md begins with 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) __recover_slot(mddev, slot->slot - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static void recover_done(void *arg, struct dlm_slot *slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int num_slots, int our_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) uint32_t generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct mddev *mddev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) cinfo->slot_number = our_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* completion is only need to be complete when node join cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * it doesn't need to run during another node's failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (test_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) complete(&cinfo->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) clear_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* the ops is called when node join the cluster, and do lock recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * if node failure occurs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static const struct dlm_lockspace_ops md_ls_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) .recover_prep = recover_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) .recover_slot = recover_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .recover_done = recover_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * The BAST function for the ack lock resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * This function wakes up the receive thread in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * order to receive and process the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static void ack_bast(void *arg, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct dlm_lock_resource *res = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct md_cluster_info *cinfo = res->mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (mode == DLM_LOCK_EX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (test_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) md_wakeup_thread(cinfo->recv_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) set_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void remove_suspend_info(struct mddev *mddev, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mddev->pers->quiesce(mddev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) spin_lock_irq(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) cinfo->suspend_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) cinfo->suspend_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) spin_unlock_irq(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) mddev->pers->quiesce(mddev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void process_suspend_info(struct mddev *mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int slot, sector_t lo, sector_t hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct mdp_superblock_1 *sb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!hi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * clear the REMOTE flag since resync or recovery is finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * in remote node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) remove_suspend_info(mddev, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) rdev_for_each(rdev, mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) sb = page_address(rdev->sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * The bitmaps are not same for different nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * if RESYNCING is happening in one node, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * the node which received the RESYNCING message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * probably will perform resync with the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * [lo, hi] again, so we could reduce resync time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * a lot if we can ensure that the bitmaps among
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * different nodes are match up well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * sync_low/hi is used to record the region which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * arrived in the previous RESYNCING message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Call md_bitmap_sync_with_cluster to clear NEEDED_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * and set RESYNC_MASK since resync thread is running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * in another node, so we don't need to do the resync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * again with the same section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * Skip md_bitmap_sync_with_cluster in case reshape
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * happening, because reshaping region is small and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * we don't want to trigger lots of WARN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (sb && !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) md_bitmap_sync_with_cluster(mddev, cinfo->sync_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) cinfo->sync_hi, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) cinfo->sync_low = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) cinfo->sync_hi = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) mddev->pers->quiesce(mddev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) spin_lock_irq(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) cinfo->suspend_from = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) cinfo->suspend_lo = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) cinfo->suspend_hi = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) spin_unlock_irq(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) mddev->pers->quiesce(mddev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) char disk_uuid[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) char event_name[] = "EVENT=ADD_DEVICE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) char raid_slot[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) char *envp[] = {event_name, disk_uuid, raid_slot, NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) len = snprintf(disk_uuid, 64, "DEVICE_UUID=");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) sprintf(disk_uuid + len, "%pU", cmsg->uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) snprintf(raid_slot, 16, "RAID_DISK=%d", le32_to_cpu(cmsg->raid_slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) init_completion(&cinfo->newdisk_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) kobject_uevent_env(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) wait_for_completion_timeout(&cinfo->newdisk_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) NEW_DEV_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int got_lock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) mddev->good_device_nr = le32_to_cpu(msg->raid_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) wait_event(mddev->thread->wqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) (got_lock = mddev_trylock(mddev)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) md_reload_sb(mddev, mddev->good_device_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (got_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) mddev_unlock(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) rdev = md_find_rdev_nr_rcu(mddev, le32_to_cpu(msg->raid_slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) set_bit(ClusterRemove, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) pr_warn("%s: %d Could not find disk(%d) to REMOVE\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) __func__, __LINE__, le32_to_cpu(msg->raid_slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) rdev = md_find_rdev_nr_rcu(mddev, le32_to_cpu(msg->raid_slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (rdev && test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) clear_bit(Faulty, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) pr_warn("%s: %d Could not find disk(%d) which is faulty",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) __func__, __LINE__, le32_to_cpu(msg->raid_slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) "node %d received it's own msg\n", le32_to_cpu(msg->slot)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) switch (le32_to_cpu(msg->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) case METADATA_UPDATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) process_metadata_update(mddev, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) case CHANGE_CAPACITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) set_capacity(mddev->gendisk, mddev->array_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) revalidate_disk_size(mddev->gendisk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) case RESYNCING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) set_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) process_suspend_info(mddev, le32_to_cpu(msg->slot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) le64_to_cpu(msg->low),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) le64_to_cpu(msg->high));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) case NEWDISK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) process_add_new_disk(mddev, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) case REMOVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) process_remove_disk(mddev, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) case RE_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) process_readd_disk(mddev, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) case BITMAP_NEEDS_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) __recover_slot(mddev, le32_to_cpu(msg->slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) case BITMAP_RESIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ret = md_bitmap_resize(mddev->bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) le64_to_cpu(msg->high), 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) pr_warn("%s:%d Received unknown message from %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) __func__, __LINE__, msg->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * thread for receiving message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static void recv_daemon(struct md_thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct md_cluster_info *cinfo = thread->mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct dlm_lock_resource *message_lockres = cinfo->message_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct cluster_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) mutex_lock(&cinfo->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /*get CR on Message*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) pr_err("md/raid1:failed to get CR on MESSAGE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) mutex_unlock(&cinfo->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* read lvb and wake up thread to process this message_lockres */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ret = process_recvd_msg(thread->mddev, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*release CR on ack_lockres*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ret = dlm_unlock_sync(ack_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (unlikely(ret != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) pr_info("unlock ack failed return %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /*up-convert to PR on message_lockres*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ret = dlm_lock_sync(message_lockres, DLM_LOCK_PR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (unlikely(ret != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) pr_info("lock PR on msg failed return %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*get CR on ack_lockres again*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ret = dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (unlikely(ret != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pr_info("lock CR on ack failed return %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*release CR on message_lockres*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ret = dlm_unlock_sync(message_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (unlikely(ret != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) pr_info("unlock msg failed return %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) mutex_unlock(&cinfo->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* lock_token()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * Takes the lock on the TOKEN lock resource so no other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * node can communicate while the operation is underway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static int lock_token(struct md_cluster_info *cinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) __func__, __LINE__, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* Lock the receive sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) mutex_lock(&cinfo->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* lock_comm()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int rv, set_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct mddev *mddev = cinfo->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * If resync thread run after raid1d thread, then process_metadata_update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * could not continue if raid1d held reconfig_mutex (and raid1d is blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * since another node already got EX on Token and waitting the EX of Ack),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * so let resync wake up thread in case flag is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) &cinfo->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) WARN_ON_ONCE(rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) set_bit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) wait_event(cinfo->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) rv = lock_token(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (set_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static void unlock_comm(struct md_cluster_info *cinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) mutex_unlock(&cinfo->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dlm_unlock_sync(cinfo->token_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) clear_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) wake_up(&cinfo->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* __sendmsg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * This function performs the actual sending of the message. This function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * usually called after performing the encompassing operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * The function:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * 1. Grabs the message lockresource in EX mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * 2. Copies the message to the message LVB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * 3. Downconverts message lockresource to CW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * 4. Upconverts ack lock resource from CR to EX. This forces the BAST on other nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * and the other nodes read the message. The thread will wait here until all other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * nodes have released ack lock resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * 5. Downconvert ack lockresource to CR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) int slot = cinfo->slot_number - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) cmsg->slot = cpu_to_le32(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /*get EX on Message*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) goto failed_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) sizeof(struct cluster_msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*down-convert EX to CW on Message*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) pr_err("md-cluster: failed to convert EX to CW on MESSAGE(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto failed_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*up-convert CR to EX on Ack*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) pr_err("md-cluster: failed to convert CR to EX on ACK(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) goto failed_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /*down-convert EX to CR on Ack*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) pr_err("md-cluster: failed to convert EX to CR on ACK(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) goto failed_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) failed_ack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) error = dlm_unlock_sync(cinfo->message_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (unlikely(error != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* in case the message can't be released due to some reason */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) goto failed_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) failed_message:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) bool mddev_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ret = lock_comm(cinfo, mddev_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ret = __sendmsg(cinfo, cmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) unlock_comm(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) static int gather_all_resync_info(struct mddev *mddev, int total_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct dlm_lock_resource *bm_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) char str[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) sector_t lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) for (i = 0; i < total_slots; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) memset(str, '\0', 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) snprintf(str, 64, "bitmap%04d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) bm_lockres = lockres_init(mddev, str, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!bm_lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (i == (cinfo->slot_number - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) lockres_free(bm_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) bm_lockres->flags |= DLM_LKF_NOQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (read_resync_info(mddev, bm_lockres)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) __func__, __LINE__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) (unsigned long long) cinfo->suspend_lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) (unsigned long long) cinfo->suspend_hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) cinfo->suspend_from = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) lockres_free(bm_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) lockres_free(bm_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* Read the disk bitmap sb and check if it needs recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ret = md_bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) lockres_free(bm_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if ((hi > 0) && (lo < mddev->recovery_cp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) mddev->recovery_cp = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) md_check_recovery(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) lockres_free(bm_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static int join(struct mddev *mddev, int nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct md_cluster_info *cinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) int ret, ops_rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) char str[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (!cinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) INIT_LIST_HEAD(&cinfo->suspend_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) spin_lock_init(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) init_completion(&cinfo->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) init_waitqueue_head(&cinfo->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) mutex_init(&cinfo->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) mddev->cluster_info = cinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) cinfo->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) memset(str, 0, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) sprintf(str, "%pU", mddev->uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) DLM_LSFL_FS, LVB_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) wait_for_completion(&cinfo->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (nodes < cinfo->slot_number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) cinfo->slot_number, nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Initiate the communication resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (!cinfo->recv_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) pr_err("md-cluster: cannot allocate memory for recv_thread!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!cinfo->message_lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (!cinfo->token_lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (!cinfo->no_new_dev_lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ret = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) pr_err("md-cluster: can't join cluster to avoid lock issue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (!cinfo->ack_lockres) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* get sync CR lock on ACK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) dlm_unlock_sync(cinfo->token_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* get sync CR lock on no-new-dev. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (!cinfo->bitmap_lockres) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) pr_err("Failed to get bitmap lock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (!cinfo->resync_lockres) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) md_unregister_thread(&cinfo->recovery_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) md_unregister_thread(&cinfo->recv_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) lockres_free(cinfo->message_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) lockres_free(cinfo->token_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) lockres_free(cinfo->ack_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) lockres_free(cinfo->no_new_dev_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) lockres_free(cinfo->resync_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) lockres_free(cinfo->bitmap_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (cinfo->lockspace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) dlm_release_lockspace(cinfo->lockspace, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) mddev->cluster_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) kfree(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static void load_bitmaps(struct mddev *mddev, int total_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* load all the node's bitmap info for resync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (gather_all_resync_info(mddev, total_slots))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) pr_err("md-cluster: failed to gather all resyn infos\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) set_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* wake up recv thread in case something need to be handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (test_and_clear_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) md_wakeup_thread(cinfo->recv_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static void resync_bitmap(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct cluster_msg cmsg = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) cmsg.type = cpu_to_le32(BITMAP_NEEDS_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) err = sendmsg(cinfo, &cmsg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) pr_err("%s:%d: failed to send BITMAP_NEEDS_SYNC message (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) __func__, __LINE__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static void unlock_all_bitmaps(struct mddev *mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static int leave(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (!cinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * BITMAP_NEEDS_SYNC message should be sent when node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * is leaving the cluster with dirty bitmap, also we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * can only deliver it when dlm connection is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * Also, we should send BITMAP_NEEDS_SYNC message in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * case reshaping is interrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) (mddev->reshape_position != MaxSector &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) test_bit(MD_CLOSING, &mddev->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) resync_bitmap(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) md_unregister_thread(&cinfo->recovery_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) md_unregister_thread(&cinfo->recv_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) lockres_free(cinfo->message_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) lockres_free(cinfo->token_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) lockres_free(cinfo->ack_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) lockres_free(cinfo->no_new_dev_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) lockres_free(cinfo->resync_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) lockres_free(cinfo->bitmap_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) unlock_all_bitmaps(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) dlm_release_lockspace(cinfo->lockspace, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) kfree(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* slot_number(): Returns the MD slot number to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * DLM starts the slot numbers from 1, wheras cluster-md
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * wants the number to be from zero, so we deduct one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static int slot_number(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return cinfo->slot_number - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * Check if the communication is already locked, else lock the communication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * If it is already locked, token is in EX mode, and hence lock_token()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * should not be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int metadata_update_start(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * metadata_update_start is always called with the protection of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * reconfig_mutex, so set WAITING_FOR_TOKEN here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ret = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) WARN_ON_ONCE(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) wait_event(cinfo->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) test_and_clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* If token is already locked, return 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (cinfo->token_lockres->mode == DLM_LOCK_EX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ret = lock_token(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static int metadata_update_finish(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct cluster_msg cmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) int raid_slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) memset(&cmsg, 0, sizeof(cmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) cmsg.type = cpu_to_le32(METADATA_UPDATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /* Pick up a good active device number to send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rdev_for_each(rdev, mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) raid_slot = rdev->desc_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (raid_slot >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) cmsg.raid_slot = cpu_to_le32(raid_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) ret = __sendmsg(cinfo, &cmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) pr_warn("md-cluster: No good device id found to send\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) unlock_comm(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static void metadata_update_cancel(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) unlock_comm(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static int update_bitmap_size(struct mddev *mddev, sector_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct cluster_msg cmsg = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) cmsg.type = cpu_to_le32(BITMAP_RESIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) cmsg.high = cpu_to_le64(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) ret = sendmsg(cinfo, &cmsg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) pr_err("%s:%d: failed to send BITMAP_RESIZE message (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) __func__, __LINE__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct bitmap_counts *counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) char str[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct dlm_lock_resource *bm_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct bitmap *bitmap = mddev->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) unsigned long my_pages = bitmap->counts.pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) int i, rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * We need to ensure all the nodes can grow to a larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * bitmap size before make the reshaping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) rv = update_bitmap_size(mddev, newsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) for (i = 0; i < mddev->bitmap_info.nodes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (i == md_cluster_ops->slot_number(mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) bitmap = get_bitmap_from_slot(mddev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (IS_ERR(bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) pr_err("can't get bitmap from slot %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) counts = &bitmap->counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * If we can hold the bitmap lock of one node then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * the slot is not occupied, update the pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) snprintf(str, 64, "bitmap%04d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) bm_lockres = lockres_init(mddev, str, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (!bm_lockres) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) pr_err("Cannot initialize %s lock\n", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) bm_lockres->flags |= DLM_LKF_NOQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (!rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) counts->pages = my_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) lockres_free(bm_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (my_pages != counts->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * Let's revert the bitmap size if one node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * can't resize bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) md_bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) md_bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) update_bitmap_size(mddev, oldsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * return 0 if all the bitmaps have the same sync_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static int cluster_check_sync_size(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) int i, rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) bitmap_super_t *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) unsigned long my_sync_size, sync_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) int node_num = mddev->bitmap_info.nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) int current_slot = md_cluster_ops->slot_number(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct bitmap *bitmap = mddev->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) char str[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct dlm_lock_resource *bm_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) sb = kmap_atomic(bitmap->storage.sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) my_sync_size = sb->sync_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) kunmap_atomic(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) for (i = 0; i < node_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (i == current_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) bitmap = get_bitmap_from_slot(mddev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (IS_ERR(bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) pr_err("can't get bitmap from slot %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * If we can hold the bitmap lock of one node then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * the slot is not occupied, update the sb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) snprintf(str, 64, "bitmap%04d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) bm_lockres = lockres_init(mddev, str, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (!bm_lockres) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) pr_err("md-cluster: Cannot initialize %s\n", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) md_bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) bm_lockres->flags |= DLM_LKF_NOQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (!rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) md_bitmap_update_sb(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) lockres_free(bm_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) sb = kmap_atomic(bitmap->storage.sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (sync_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) sync_size = sb->sync_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) else if (sync_size != sb->sync_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) kunmap_atomic(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) md_bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) kunmap_atomic(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) md_bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return (my_sync_size == sync_size) ? 0 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * Update the size for cluster raid is a little more complex, we perform it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * by the steps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * 1. hold token lock and update superblock in initiator node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * 2. send METADATA_UPDATED msg to other nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * 3. The initiator node continues to check each bitmap's sync_size, if all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * bitmaps have the same value of sync_size, then we can set capacity and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * let other nodes to perform it. If one node can't update sync_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * accordingly, we need to revert to previous value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct cluster_msg cmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) int raid_slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) md_update_sb(mddev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (lock_comm(cinfo, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) pr_err("%s: lock_comm failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) memset(&cmsg, 0, sizeof(cmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) cmsg.type = cpu_to_le32(METADATA_UPDATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) rdev_for_each(rdev, mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) raid_slot = rdev->desc_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (raid_slot >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) cmsg.raid_slot = cpu_to_le32(raid_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * We can only change capiticy after all the nodes can do it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * so need to wait after other nodes already received the msg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * and handled the change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) ret = __sendmsg(cinfo, &cmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) pr_err("%s:%d: failed to send METADATA_UPDATED msg\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) __func__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) unlock_comm(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) pr_err("md-cluster: No good device id found to send\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) unlock_comm(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * check the sync_size from other node's bitmap, if sync_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * have already updated in other nodes as expected, send an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * empty metadata msg to permit the change of capacity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (cluster_check_sync_size(mddev) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) memset(&cmsg, 0, sizeof(cmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) cmsg.type = cpu_to_le32(CHANGE_CAPACITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) ret = __sendmsg(cinfo, &cmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) pr_err("%s:%d: failed to send CHANGE_CAPACITY msg\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) __func__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) set_capacity(mddev->gendisk, mddev->array_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) revalidate_disk_size(mddev->gendisk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /* revert to previous sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) ret = mddev->pers->resize(mddev, old_dev_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) revalidate_disk_size(mddev->gendisk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) ret = __sendmsg(cinfo, &cmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) pr_err("%s:%d: failed to send METADATA_UPDATED msg\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) __func__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) unlock_comm(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static int resync_start(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return dlm_lock_sync_interruptible(cinfo->resync_lockres, DLM_LOCK_EX, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) static void resync_info_get(struct mddev *mddev, sector_t *lo, sector_t *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) spin_lock_irq(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) *lo = cinfo->suspend_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) *hi = cinfo->suspend_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) spin_unlock_irq(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct resync_info ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct cluster_msg cmsg = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* do not send zero again, if we have sent before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (hi == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) memcpy(&ri, cinfo->bitmap_lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (le64_to_cpu(ri.hi) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) add_resync_info(cinfo->bitmap_lockres, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* Re-acquire the lock to refresh LVB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) cmsg.type = cpu_to_le32(RESYNCING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) cmsg.low = cpu_to_le64(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) cmsg.high = cpu_to_le64(hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * mddev_lock is held if resync_info_update is called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * resync_finish (md_reap_sync_thread -> resync_finish)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (lo == 0 && hi == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return sendmsg(cinfo, &cmsg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return sendmsg(cinfo, &cmsg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) static int resync_finish(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * If resync thread is interrupted so we can't say resync is finished,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * another node will launch resync thread to continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (!test_bit(MD_CLOSING, &mddev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ret = resync_info_update(mddev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) dlm_unlock_sync(cinfo->resync_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static int area_resyncing(struct mddev *mddev, int direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) sector_t lo, sector_t hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if ((direction == READ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) spin_lock_irq(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (hi > cinfo->suspend_lo && lo < cinfo->suspend_hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) spin_unlock_irq(&cinfo->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) /* add_new_disk() - initiates a disk add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * However, if this fails before writing md_update_sb(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * add_new_disk_cancel() must be called to release token lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct cluster_msg cmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) char *uuid = sb->device_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) memset(&cmsg, 0, sizeof(cmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) cmsg.type = cpu_to_le32(NEWDISK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) memcpy(cmsg.uuid, uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (lock_comm(cinfo, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ret = __sendmsg(cinfo, &cmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) unlock_comm(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /* Some node does not "see" the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) unlock_comm(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /* Since MD_CHANGE_DEVS will be set in add_bound_rdev which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * will run soon after add_new_disk, the below path will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * invoked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * md_wakeup_thread(mddev->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * -> conf->thread (raid1d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * -> md_check_recovery -> md_update_sb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * -> metadata_update_start/finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * MD_CLUSTER_SEND_LOCKED_ALREADY will be cleared eventually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * For other failure cases, metadata_update_cancel and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * add_new_disk_cancel also clear below bit as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) set_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) wake_up(&cinfo->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static void add_new_disk_cancel(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) unlock_comm(cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static int new_disk_ack(struct mddev *mddev, bool ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (!test_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) dlm_unlock_sync(cinfo->no_new_dev_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) complete(&cinfo->newdisk_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) static int remove_disk(struct mddev *mddev, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) struct cluster_msg cmsg = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) cmsg.type = cpu_to_le32(REMOVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return sendmsg(cinfo, &cmsg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static int lock_all_bitmaps(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) int slot, my_slot, ret, held = 1, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) char str[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) cinfo->other_bitmap_lockres =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) kcalloc(mddev->bitmap_info.nodes - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) sizeof(struct dlm_lock_resource *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (!cinfo->other_bitmap_lockres) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) pr_err("md: can't alloc mem for other bitmap locks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) my_slot = slot_number(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) for (slot = 0; slot < mddev->bitmap_info.nodes; slot++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (slot == my_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) memset(str, '\0', 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) snprintf(str, 64, "bitmap%04d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) cinfo->other_bitmap_lockres[i] = lockres_init(mddev, str, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (!cinfo->other_bitmap_lockres[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) cinfo->other_bitmap_lockres[i]->flags |= DLM_LKF_NOQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) ret = dlm_lock_sync(cinfo->other_bitmap_lockres[i], DLM_LOCK_PW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) held = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) return held;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) static void unlock_all_bitmaps(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /* release other node's bitmap lock if they are existed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (cinfo->other_bitmap_lockres) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) for (i = 0; i < mddev->bitmap_info.nodes - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (cinfo->other_bitmap_lockres[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) lockres_free(cinfo->other_bitmap_lockres[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) kfree(cinfo->other_bitmap_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) cinfo->other_bitmap_lockres = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static int gather_bitmaps(struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) int sn, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) sector_t lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) struct cluster_msg cmsg = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct mddev *mddev = rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct md_cluster_info *cinfo = mddev->cluster_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) cmsg.type = cpu_to_le32(RE_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) err = sendmsg(cinfo, &cmsg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (sn == (cinfo->slot_number - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) err = md_bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if ((hi > 0) && (lo < mddev->recovery_cp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) mddev->recovery_cp = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) static struct md_cluster_operations cluster_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) .join = join,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) .leave = leave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) .slot_number = slot_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) .resync_start = resync_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) .resync_finish = resync_finish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) .resync_info_update = resync_info_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) .resync_info_get = resync_info_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) .metadata_update_start = metadata_update_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) .metadata_update_finish = metadata_update_finish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) .metadata_update_cancel = metadata_update_cancel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) .area_resyncing = area_resyncing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) .add_new_disk = add_new_disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) .add_new_disk_cancel = add_new_disk_cancel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) .new_disk_ack = new_disk_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) .remove_disk = remove_disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) .load_bitmaps = load_bitmaps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) .gather_bitmaps = gather_bitmaps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) .resize_bitmaps = resize_bitmaps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) .lock_all_bitmaps = lock_all_bitmaps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) .unlock_all_bitmaps = unlock_all_bitmaps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) .update_size = update_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) static int __init cluster_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) pr_warn("md-cluster: support raid1 and raid10 (limited support)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) pr_info("Registering Cluster MD functions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) register_md_cluster_operations(&cluster_ops, THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) static void cluster_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) unregister_md_cluster_operations();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) module_init(cluster_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) module_exit(cluster_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) MODULE_AUTHOR("SUSE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) MODULE_DESCRIPTION("Clustering support for MD");