^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2003 Sistina Software Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "dm-rq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "dm-bio-record.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "dm-path-selector.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "dm-uevent.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <scsi/scsi_dh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define DM_MSG_PREFIX "multipath"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define DM_PG_INIT_DELAY_MSECS 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Path properties */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct pgpath {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct priority_group *pg; /* Owning PG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned fail_count; /* Cumulative failure count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct dm_path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct delayed_work activate_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) bool is_active:1; /* Path status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Paths are grouped into Priority Groups and numbered from 1 upwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Each has a path selector which controls which path gets used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct priority_group {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct multipath *m; /* Owning multipath instance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct path_selector ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned pg_num; /* Reference number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned nr_pgpaths; /* Number of paths in PG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct list_head pgpaths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) bool bypassed:1; /* Temporarily bypass this PG? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Multipath context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct multipath {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long flags; /* Multipath state flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) enum dm_queue_mode queue_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct pgpath *current_pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct priority_group *current_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct priority_group *next_pg; /* Switch to this PG if set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) atomic_t nr_valid_paths; /* Total number of usable paths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned nr_priority_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct list_head priority_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) const char *hw_handler_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) char *hw_handler_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned pg_init_retries; /* Number of times to retry pg_init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) atomic_t pg_init_count; /* Number of times pg_init called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct mutex work_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct work_struct trigger_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct work_struct process_queued_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct bio_list queued_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct timer_list nopath_timer; /* Timeout for queue_if_no_path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Context information attached to each io we process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct dm_mpath_io {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct pgpath *pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) size_t nr_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) typedef int (*action_fn) (struct pgpath *pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void trigger_event(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void activate_or_offline_path(struct pgpath *pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void activate_path_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void process_queued_bios(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void queue_if_no_path_timeout_work(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*-----------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Multipath state flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *-----------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) bool r = test_bit(MPATHF_bit, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) r = test_bit(MPATHF_bit, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*-----------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Allocation routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *-----------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static struct pgpath *alloc_pgpath(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!pgpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) pgpath->is_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static void free_pgpath(struct pgpath *pgpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) kfree(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static struct priority_group *alloc_priority_group(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct priority_group *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pg = kzalloc(sizeof(*pg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) INIT_LIST_HEAD(&pg->pgpaths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct pgpath *pgpath, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) list_del(&pgpath->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) dm_put_device(ti, pgpath->path.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) free_pgpath(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void free_priority_group(struct priority_group *pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct path_selector *ps = &pg->ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (ps->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ps->type->destroy(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dm_put_path_selector(ps->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) free_pgpaths(&pg->pgpaths, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) kfree(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static struct multipath *alloc_multipath(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct multipath *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) m = kzalloc(sizeof(*m), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) INIT_LIST_HEAD(&m->priority_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) spin_lock_init(&m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) atomic_set(&m->nr_valid_paths, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) INIT_WORK(&m->trigger_event, trigger_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) mutex_init(&m->work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) m->queue_mode = DM_TYPE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) m->ti = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ti->private = m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (m->queue_mode == DM_TYPE_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) m->queue_mode = DM_TYPE_REQUEST_BASED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) INIT_WORK(&m->process_queued_bios, process_queued_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * bio-based doesn't support any direct scsi_dh management;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * it just discovers if a scsi_dh is attached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dm_table_set_type(ti->table, m->queue_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * Init fields that are only used when a scsi_dh is attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * - must do this unconditionally (really doesn't hurt non-SCSI uses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) set_bit(MPATHF_QUEUE_IO, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) atomic_set(&m->pg_init_in_progress, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) atomic_set(&m->pg_init_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) init_waitqueue_head(&m->pg_init_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void free_multipath(struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct priority_group *pg, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) list_del(&pg->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) free_priority_group(pg, m->ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) kfree(m->hw_handler_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) kfree(m->hw_handler_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) mutex_destroy(&m->work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) kfree(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static struct dm_mpath_io *get_mpio(union map_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return info->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static size_t multipath_per_bio_data_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return dm_per_bio_data(bio, multipath_per_bio_data_size());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void *bio_details = mpio + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return bio_details;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) mpio->nr_bytes = bio->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mpio->pgpath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *mpio_p = mpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) dm_bio_record(bio_details, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*-----------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Path selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *-----------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int __pg_init_all_paths(struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct pgpath *pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long pg_init_delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) lockdep_assert_held(&m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) atomic_inc(&m->pg_init_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* Check here to reset pg_init_required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!m->current_pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* Skip failed paths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!pgpath->is_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pg_init_delay))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) atomic_inc(&m->pg_init_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return atomic_read(&m->pg_init_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static int pg_init_all_paths(struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ret = __pg_init_all_paths(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static void __switch_pg(struct multipath *m, struct priority_group *pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) lockdep_assert_held(&m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) m->current_pg = pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Must we initialise the PG first, and queue I/O till it's ready? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (m->hw_handler_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) set_bit(MPATHF_QUEUE_IO, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) clear_bit(MPATHF_QUEUE_IO, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) atomic_set(&m->pg_init_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static struct pgpath *choose_path_in_pg(struct multipath *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct priority_group *pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) size_t nr_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct dm_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct pgpath *pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) path = pg->ps.type->select_path(&pg->ps, nr_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return ERR_PTR(-ENXIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) pgpath = path_to_pgpath(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (unlikely(READ_ONCE(m->current_pg) != pg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Only update current_pgpath if pg changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) m->current_pgpath = pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) __switch_pg(m, pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct priority_group *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct pgpath *pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) unsigned bypassed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!atomic_read(&m->nr_valid_paths)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) clear_bit(MPATHF_QUEUE_IO, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Were we instructed to switch PG? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (READ_ONCE(m->next_pg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pg = m->next_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!pg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto check_current_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) m->next_pg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pgpath = choose_path_in_pg(m, pg, nr_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!IS_ERR_OR_NULL(pgpath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Don't change PG until it has no remaining paths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) check_current_pg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) pg = READ_ONCE(m->current_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (pg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) pgpath = choose_path_in_pg(m, pg, nr_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (!IS_ERR_OR_NULL(pgpath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Loop through priority groups until we find a valid path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * First time we skip PGs marked 'bypassed'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * Second time we only try the ones we skipped, but set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * pg_init_delay_retry so we do not hammer controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) list_for_each_entry(pg, &m->priority_groups, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (pg->bypassed == !!bypassed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) pgpath = choose_path_in_pg(m, pg, nr_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!IS_ERR_OR_NULL(pgpath)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (!bypassed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) } while (bypassed--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) m->current_pgpath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) m->current_pg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * report the function name and line number of the function from which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * it has been invoked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) #define dm_report_EIO(m) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) dm_table_device_name((m)->ti->table), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dm_noflush_suspending((m)->ti)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * Check whether bios must be queued in the device-mapper core rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * than here in the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static bool __must_push_back(struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return dm_noflush_suspending(m->ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static bool must_push_back_rq(struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * Map cloned requests (request-based multipath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) union map_info *map_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct request **__clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) size_t nr_bytes = blk_rq_bytes(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct pgpath *pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct dm_mpath_io *mpio = get_mpio(map_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct request *clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* Do we need to select a new pgpath? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) pgpath = READ_ONCE(m->current_pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) pgpath = choose_pgpath(m, nr_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!pgpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (must_push_back_rq(m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return DM_MAPIO_DELAY_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dm_report_EIO(m); /* Failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) pg_init_all_paths(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return DM_MAPIO_DELAY_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) mpio->pgpath = pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) mpio->nr_bytes = nr_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) bdev = pgpath->path.dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) BLK_MQ_REQ_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (IS_ERR(clone)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (blk_queue_dying(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) atomic_inc(&m->pg_init_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) activate_or_offline_path(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return DM_MAPIO_DELAY_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * blk-mq's SCHED_RESTART can cover this requeue, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * needn't deal with it by DELAY_REQUEUE. More importantly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * we have to return DM_MAPIO_REQUEUE so that blk-mq can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * get the queue busy feedback (via BLK_STS_RESOURCE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * otherwise I/O merging can suffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return DM_MAPIO_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) clone->bio = clone->biotail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) clone->rq_disk = bdev->bd_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) *__clone = clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (pgpath->pg->ps.type->start_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) &pgpath->path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) nr_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static void multipath_release_clone(struct request *clone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) union map_info *map_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (unlikely(map_context)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * non-NULL map_context means caller is still map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * method; must undo multipath_clone_and_map()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct dm_mpath_io *mpio = get_mpio(map_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct pgpath *pgpath = mpio->pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (pgpath && pgpath->pg->ps.type->end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) &pgpath->path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) mpio->nr_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) clone->io_start_time_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) blk_put_request(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * Map cloned bios (bio-based multipath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Queue for the daemon to resubmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) bio_list_add(&m->queued_bios, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) queue_work(kmultipathd, &m->process_queued_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static void multipath_queue_bio(struct multipath *m, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) __multipath_queue_bio(m, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct pgpath *pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Do we need to select a new pgpath? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) pgpath = READ_ONCE(m->current_pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (!pgpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) __multipath_queue_bio(m, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) pgpath = ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) multipath_queue_bio(m, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pg_init_all_paths(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static int __multipath_map_bio(struct multipath *m, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct dm_mpath_io *mpio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct pgpath *pgpath = __map_bio(m, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (IS_ERR(pgpath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!pgpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (__must_push_back(m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return DM_MAPIO_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dm_report_EIO(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mpio->pgpath = pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) bio->bi_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) bio_set_dev(bio, pgpath->path.dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (pgpath->pg->ps.type->start_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) &pgpath->path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) mpio->nr_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct dm_mpath_io *mpio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) multipath_init_per_bio_data(bio, &mpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return __multipath_map_bio(m, bio, mpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static void process_queued_io_list(struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (m->queue_mode == DM_TYPE_REQUEST_BASED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) else if (m->queue_mode == DM_TYPE_BIO_BASED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) queue_work(kmultipathd, &m->process_queued_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static void process_queued_bios(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct bio_list bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct multipath *m =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) container_of(work, struct multipath, process_queued_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) bio_list_init(&bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (bio_list_empty(&m->queued_bios)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) bio_list_merge(&bios, &m->queued_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) bio_list_init(&m->queued_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) while ((bio = bio_list_pop(&bios))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) r = __multipath_map_bio(m, bio, mpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) switch (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) case DM_MAPIO_KILL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) case DM_MAPIO_REQUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) bio->bi_status = BLK_STS_DM_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) case DM_MAPIO_REMAPPED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) case DM_MAPIO_SUBMITTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * If we run out of usable paths, should we queue I/O or error it?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) bool save_old_value, const char *caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) const char *dm_dev_name = dm_table_device_name(m->ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (save_old_value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) dm_dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) } else if (!queue_if_no_path && saved_queue_if_no_path_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* due to "fail_if_no_path" message, need to honor it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) dm_dev_name, __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) dm_noflush_suspending(m->ti));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (!queue_if_no_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) dm_table_run_md_queue_async(m->ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) process_queued_io_list(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * process any queued I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static void queue_if_no_path_timeout_work(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct multipath *m = from_timer(m, t, nopath_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) DMWARN("queue_if_no_path timeout on %s, failing queued IO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) dm_table_device_name(m->ti->table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) queue_if_no_path(m, false, false, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * Enable the queue_if_no_path timeout if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * Called with m->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) static void enable_nopath_timeout(struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) unsigned long queue_if_no_path_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) lockdep_assert_held(&m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (queue_if_no_path_timeout > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) atomic_read(&m->nr_valid_paths) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) mod_timer(&m->nopath_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) jiffies + queue_if_no_path_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static void disable_nopath_timeout(struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) del_timer_sync(&m->nopath_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * An event is triggered whenever a path is taken out of use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * Includes path failure and PG bypass.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static void trigger_event(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct multipath *m =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) container_of(work, struct multipath, trigger_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) dm_table_event(m->ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * Constructor/argument parsing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * <#multipath feature args> [<arg>]*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * <#hw_handler args> [hw_handler [<arg>]*]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * <#priority groups>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * <initial priority group>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * [<selector> <#selector args> [<arg>]*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * <#paths> <#per-path selector args>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * [<path> [<arg>]* ]+ ]+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct path_selector_type *pst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) unsigned ps_argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static const struct dm_arg _args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {0, 1024, "invalid number of path selector args"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) pst = dm_get_path_selector(dm_shift_arg(as));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (!pst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ti->error = "unknown path selector type";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) dm_put_path_selector(pst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) r = pst->create(&pg->ps, ps_argc, as->argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) dm_put_path_selector(pst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) ti->error = "path selector constructor failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) pg->ps.type = pst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) dm_consume_args(as, ps_argc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) const char **attached_handler_name, char **error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct request_queue *q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) retain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (*attached_handler_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * Clear any hw_handler_params associated with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * handler that isn't already attached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) kfree(m->hw_handler_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) m->hw_handler_params = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * Reset hw_handler_name to match the attached handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * NB. This modifies the table line to show the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * handler instead of the original table passed in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) kfree(m->hw_handler_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) m->hw_handler_name = *attached_handler_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) *attached_handler_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (m->hw_handler_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) r = scsi_dh_attach(q, m->hw_handler_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (r == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) bdevname(bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) goto retain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) *error = "error attaching hardware handler";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (m->hw_handler_params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) r = scsi_dh_set_params(q, m->hw_handler_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) *error = "unable to set hardware handler parameters";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct pgpath *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) const char *attached_handler_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* we need at least a path arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (as->argc < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ti->error = "no device given";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) p = alloc_pgpath();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) &p->path.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) ti->error = "error getting device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) q = bdev_get_queue(p->path.dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (attached_handler_name || m->hw_handler_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) kfree(attached_handler_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) dm_put_device(ti, p->path.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) dm_put_device(ti, p->path.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) free_pgpath(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return ERR_PTR(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static struct priority_group *parse_priority_group(struct dm_arg_set *as,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static const struct dm_arg _args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {1, 1024, "invalid number of paths"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {0, 1024, "invalid number of selector args"}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) unsigned i, nr_selector_args, nr_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct priority_group *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct dm_target *ti = m->ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (as->argc < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) as->argc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) ti->error = "not enough priority group arguments";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) pg = alloc_priority_group();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (!pg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ti->error = "couldn't allocate priority group";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) pg->m = m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) r = parse_path_selector(as, pg, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * read the paths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) nr_args = 1 + nr_selector_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) for (i = 0; i < pg->nr_pgpaths; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct pgpath *pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct dm_arg_set path_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (as->argc < nr_args) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ti->error = "not enough path parameters";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) path_args.argc = nr_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) path_args.argv = as->argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) pgpath = parse_path(&path_args, &pg->ps, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (IS_ERR(pgpath)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) r = PTR_ERR(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) pgpath->pg = pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) list_add_tail(&pgpath->list, &pg->pgpaths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) dm_consume_args(as, nr_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) free_priority_group(pg, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return ERR_PTR(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) unsigned hw_argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct dm_target *ti = m->ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static const struct dm_arg _args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {0, 1024, "invalid number of hardware handler args"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (!hw_argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (m->queue_mode == DM_TYPE_BIO_BASED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) dm_consume_args(as, hw_argc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) DMERR("bio-based multipath doesn't allow hardware handler args");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (!m->hw_handler_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (hw_argc > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int i, j, len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) for (i = 0; i <= hw_argc - 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) len += strlen(as->argv[i]) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ti->error = "memory allocation failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) j = sprintf(p, "%d", hw_argc - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) j = sprintf(p, "%s", as->argv[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) dm_consume_args(as, hw_argc - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) kfree(m->hw_handler_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) m->hw_handler_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static int parse_features(struct dm_arg_set *as, struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) unsigned argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct dm_target *ti = m->ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) const char *arg_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static const struct dm_arg _args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {0, 8, "invalid number of feature args"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {1, 50, "pg_init_retries must be between 1 and 50"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) r = dm_read_arg_group(_args, as, &argc, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (!argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) arg_name = dm_shift_arg(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (!strcasecmp(arg_name, "queue_if_no_path")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) r = queue_if_no_path(m, true, false, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (!strcasecmp(arg_name, "pg_init_retries") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) (argc >= 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) (argc >= 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (!strcasecmp(arg_name, "queue_mode") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) (argc >= 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) const char *queue_mode_name = dm_shift_arg(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (!strcasecmp(queue_mode_name, "bio"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) m->queue_mode = DM_TYPE_BIO_BASED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) else if (!strcasecmp(queue_mode_name, "rq") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) !strcasecmp(queue_mode_name, "mq"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) m->queue_mode = DM_TYPE_REQUEST_BASED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ti->error = "Unknown 'queue_mode' requested";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) ti->error = "Unrecognised multipath feature request";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) } while (argc && !r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /* target arguments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static const struct dm_arg _args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {0, 1024, "invalid number of priority groups"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {0, 1024, "invalid initial priority group number"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) struct multipath *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) struct dm_arg_set as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) unsigned pg_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) unsigned next_pg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) as.argc = argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) as.argv = argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) m = alloc_multipath(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (!m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) ti->error = "can't allocate multipath";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) r = parse_features(&as, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) r = alloc_multipath_stage2(ti, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) r = parse_hw_handler(&as, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if ((!m->nr_priority_groups && next_pg_num) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) (m->nr_priority_groups && !next_pg_num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) ti->error = "invalid initial priority group";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /* parse the priority groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) while (as.argc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct priority_group *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) pg = parse_priority_group(&as, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (IS_ERR(pg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) r = PTR_ERR(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) nr_valid_paths += pg->nr_pgpaths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) atomic_set(&m->nr_valid_paths, nr_valid_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) list_add_tail(&pg->list, &m->priority_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) pg_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) pg->pg_num = pg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (!--next_pg_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) m->next_pg = pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (pg_count != m->nr_priority_groups) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) ti->error = "priority group count mismatch";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) enable_nopath_timeout(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) ti->num_flush_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) ti->num_discard_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) ti->num_write_same_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) ti->num_write_zeroes_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (m->queue_mode == DM_TYPE_BIO_BASED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ti->per_io_data_size = multipath_per_bio_data_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) ti->per_io_data_size = sizeof(struct dm_mpath_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) free_multipath(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) static void multipath_wait_for_pg_init_completion(struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (!atomic_read(&m->pg_init_in_progress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) finish_wait(&m->pg_init_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static void flush_multipath_work(struct multipath *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (m->hw_handler_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (!atomic_read(&m->pg_init_in_progress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (atomic_read(&m->pg_init_in_progress) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) flush_workqueue(kmpath_handlerd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) multipath_wait_for_pg_init_completion(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (m->queue_mode == DM_TYPE_BIO_BASED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) flush_work(&m->process_queued_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) flush_work(&m->trigger_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static void multipath_dtr(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) disable_nopath_timeout(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) flush_multipath_work(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) free_multipath(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * Take a path out of use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static int fail_path(struct pgpath *pgpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) struct multipath *m = pgpath->pg->m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (!pgpath->is_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) DMWARN("%s: Failing path %s.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) dm_table_device_name(m->ti->table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) pgpath->path.dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) pgpath->is_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) pgpath->fail_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) atomic_dec(&m->nr_valid_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (pgpath == m->current_pgpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) m->current_pgpath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) schedule_work(&m->trigger_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) enable_nopath_timeout(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * Reinstate a previously-failed path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) static int reinstate_path(struct pgpath *pgpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) int r = 0, run_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct multipath *m = pgpath->pg->m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) unsigned nr_valid_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (pgpath->is_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) DMWARN("%s: Reinstating path %s.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) dm_table_device_name(m->ti->table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) pgpath->path.dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) pgpath->is_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (nr_valid_paths == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) m->current_pgpath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) run_queue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) atomic_inc(&m->pg_init_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) pgpath->path.dev->name, nr_valid_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) schedule_work(&m->trigger_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (run_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) dm_table_run_md_queue_async(m->ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) process_queued_io_list(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (pgpath->is_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) disable_nopath_timeout(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * Fail or reinstate all paths that match the provided struct dm_dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) static int action_dev(struct multipath *m, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) action_fn action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) int r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) struct pgpath *pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct priority_group *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) list_for_each_entry(pg, &m->priority_groups, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) list_for_each_entry(pgpath, &pg->pgpaths, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (pgpath->path.dev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) r = action(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * Temporarily try to avoid having to use the specified PG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static void bypass_pg(struct multipath *m, struct priority_group *pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) bool bypassed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) pg->bypassed = bypassed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) m->current_pgpath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) m->current_pg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) schedule_work(&m->trigger_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * Switch to using the specified PG from the next I/O that gets mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static int switch_pg_num(struct multipath *m, const char *pgstr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct priority_group *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) unsigned pgnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) DMWARN("invalid PG number supplied to switch_pg_num");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) list_for_each_entry(pg, &m->priority_groups, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) pg->bypassed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (--pgnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) m->current_pgpath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) m->current_pg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) m->next_pg = pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) schedule_work(&m->trigger_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * Set/clear bypassed status of a PG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * PGs are numbered upwards from 1 in the order they were declared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct priority_group *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) unsigned pgnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) DMWARN("invalid PG number supplied to bypass_pg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) list_for_each_entry(pg, &m->priority_groups, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (!--pgnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) bypass_pg(m, pg, bypassed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * Should we retry pg_init immediately?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) bool limit_reached = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) limit_reached = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) return limit_reached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static void pg_init_done(void *data, int errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct pgpath *pgpath = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct priority_group *pg = pgpath->pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct multipath *m = pg->m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) bool delay_retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /* device or driver problems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) switch (errors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) case SCSI_DH_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) case SCSI_DH_NOSYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (!m->hw_handler_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) errors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) DMERR("Could not failover the device: Handler scsi_dh_%s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) "Error %d.", m->hw_handler_name, errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * Fail path for now, so we do not ping pong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) fail_path(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) case SCSI_DH_DEV_TEMP_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * Probably doing something like FW upgrade on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * controller so try the other pg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) bypass_pg(m, pg, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) case SCSI_DH_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) /* Wait before retrying. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) delay_retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) case SCSI_DH_IMM_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) case SCSI_DH_RES_TEMP_UNAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (pg_init_limit_reached(m, pgpath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) fail_path(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) errors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) case SCSI_DH_DEV_OFFLINED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * We probably do not want to fail the path for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * error, but this is what the old dm did. In future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * patches we can do more advanced handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) fail_path(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (errors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (pgpath == m->current_pgpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) DMERR("Could not failover device. Error %d.", errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) m->current_pgpath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) m->current_pg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) pg->bypassed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (atomic_dec_return(&m->pg_init_in_progress) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* Activations of other paths are still on going */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (delay_retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (__pg_init_all_paths(m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) clear_bit(MPATHF_QUEUE_IO, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) process_queued_io_list(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * Wake up any thread waiting to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) wake_up(&m->pg_init_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static void activate_or_offline_path(struct pgpath *pgpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (pgpath->is_active && !blk_queue_dying(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) scsi_dh_activate(q, pg_init_done, pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static void activate_path_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) struct pgpath *pgpath =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) container_of(work, struct pgpath, activate_path.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) activate_or_offline_path(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static int multipath_end_io(struct dm_target *ti, struct request *clone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) blk_status_t error, union map_info *map_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) struct dm_mpath_io *mpio = get_mpio(map_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct pgpath *pgpath = mpio->pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) int r = DM_ENDIO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * We don't queue any clone request inside the multipath target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * during end I/O handling, since those clone requests don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * bio clones. If we queue them inside the multipath target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * we need to make bio clones, that requires memory allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * don't have bio clones.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * Instead of queueing the clone request here, we queue the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * request into dm core, which will remake a clone request and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * clone bios for it and resubmit it later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (error && blk_path_error(error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (error == BLK_STS_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) r = DM_ENDIO_DELAY_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) r = DM_ENDIO_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (pgpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) fail_path(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (!atomic_read(&m->nr_valid_paths) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) !must_push_back_rq(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (error == BLK_STS_IOERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) dm_report_EIO(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /* complete with the original error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) r = DM_ENDIO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (pgpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct path_selector *ps = &pgpath->pg->ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (ps->type->end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) clone->io_start_time_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) blk_status_t *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct pgpath *pgpath = mpio->pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) int r = DM_ENDIO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (!*error || !blk_path_error(*error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (pgpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) fail_path(pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (!atomic_read(&m->nr_valid_paths)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (__must_push_back(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) r = DM_ENDIO_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) dm_report_EIO(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) *error = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) multipath_queue_bio(m, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) r = DM_ENDIO_INCOMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (pgpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) struct path_selector *ps = &pgpath->pg->ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (ps->type->end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) dm_start_time_ns_from_clone(clone));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * Suspend with flush can't complete until all the I/O is processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * so if the last path fails we must error any remaining I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * - Note that if the freeze_bdev fails while suspending, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * queue_if_no_path state is lost - userspace should reset it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * Otherwise, during noflush suspend, queue_if_no_path will not change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static void multipath_presuspend(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) queue_if_no_path(m, false, true, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) static void multipath_postsuspend(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) mutex_lock(&m->work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) flush_multipath_work(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) mutex_unlock(&m->work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) * Restore the queue_if_no_path setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) static void multipath_resume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) dm_table_device_name(m->ti->table), __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * Info output has the following format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * num_multipath_feature_args [multipath_feature_args]*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * num_handler_status_args [handler_status_args]*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * num_groups init_group_number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * [A|D|E num_ps_status_args [ps_status_args]*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * num_paths num_selector_args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * [path_dev A|F fail_count [selector_args]* ]+ ]+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * Table output has the following format (identical to the constructor string):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * num_feature_args [features_args]*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * num_handler_args hw_handler [hw_handler_args]*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * num_groups init_group_number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) * [priority selector-name num_ps_args [ps_args]*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static void multipath_status(struct dm_target *ti, status_type_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) unsigned status_flags, char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) int sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) struct priority_group *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) struct pgpath *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) unsigned pg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) char state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) /* Features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (type == STATUSTYPE_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) atomic_read(&m->pg_init_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) (m->pg_init_retries > 0) * 2 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) DMEMIT("queue_if_no_path ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (m->pg_init_retries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) DMEMIT("pg_init_retries %u ", m->pg_init_retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) DMEMIT("retain_attached_hw_handler ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) switch(m->queue_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) case DM_TYPE_BIO_BASED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) DMEMIT("queue_mode bio ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) WARN_ON_ONCE(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (!m->hw_handler_name || type == STATUSTYPE_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) DMEMIT("0 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) DMEMIT("1 %s ", m->hw_handler_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) DMEMIT("%u ", m->nr_priority_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (m->next_pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) pg_num = m->next_pg->pg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) else if (m->current_pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) pg_num = m->current_pg->pg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) pg_num = (m->nr_priority_groups ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) DMEMIT("%u ", pg_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) case STATUSTYPE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) list_for_each_entry(pg, &m->priority_groups, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (pg->bypassed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) state = 'D'; /* Disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) else if (pg == m->current_pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) state = 'A'; /* Currently Active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) state = 'E'; /* Enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) DMEMIT("%c ", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (pg->ps.type->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) sz += pg->ps.type->status(&pg->ps, NULL, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) result + sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) maxlen - sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) DMEMIT("0 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) DMEMIT("%u %u ", pg->nr_pgpaths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) pg->ps.type->info_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) list_for_each_entry(p, &pg->pgpaths, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) DMEMIT("%s %s %u ", p->path.dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) p->is_active ? "A" : "F",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) p->fail_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (pg->ps.type->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) sz += pg->ps.type->status(&pg->ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) &p->path, type, result + sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) maxlen - sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) case STATUSTYPE_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) list_for_each_entry(pg, &m->priority_groups, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) DMEMIT("%s ", pg->ps.type->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (pg->ps.type->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) sz += pg->ps.type->status(&pg->ps, NULL, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) result + sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) maxlen - sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) DMEMIT("0 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) DMEMIT("%u %u ", pg->nr_pgpaths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) pg->ps.type->table_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) list_for_each_entry(p, &pg->pgpaths, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) DMEMIT("%s ", p->path.dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (pg->ps.type->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) sz += pg->ps.type->status(&pg->ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) &p->path, type, result + sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) maxlen - sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) int r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) struct dm_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) action_fn action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) mutex_lock(&m->work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (dm_suspended(ti)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) r = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (argc == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if (!strcasecmp(argv[0], "queue_if_no_path")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) r = queue_if_no_path(m, true, false, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) enable_nopath_timeout(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) r = queue_if_no_path(m, false, false, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) disable_nopath_timeout(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (argc != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (!strcasecmp(argv[0], "disable_group")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) r = bypass_pg_num(m, argv[1], true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) } else if (!strcasecmp(argv[0], "enable_group")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) r = bypass_pg_num(m, argv[1], false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) } else if (!strcasecmp(argv[0], "switch_group")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) r = switch_pg_num(m, argv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) } else if (!strcasecmp(argv[0], "reinstate_path"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) action = reinstate_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) else if (!strcasecmp(argv[0], "fail_path"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) action = fail_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) DMWARN("Unrecognised multipath message received: %s", argv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) DMWARN("message: error getting device %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) argv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) r = action_dev(m, dev, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) dm_put_device(ti, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) mutex_unlock(&m->work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) static int multipath_prepare_ioctl(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) struct block_device **bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct pgpath *pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) pgpath = READ_ONCE(m->current_pgpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) pgpath = choose_pgpath(m, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (pgpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) *bdev = pgpath->path.dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) /* pg_init has not started or completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) r = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) /* No path is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) r = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) r = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (r == -ENOTCONN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (!READ_ONCE(m->current_pg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) /* Path status changed, redo selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) (void) choose_pgpath(m, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) (void) __pg_init_all_paths(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) dm_table_run_md_queue_async(m->ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) process_queued_io_list(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * Only pass ioctls through if the device sizes match exactly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) static int multipath_iterate_devices(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) iterate_devices_callout_fn fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) struct priority_group *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) struct pgpath *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) list_for_each_entry(pg, &m->priority_groups, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) list_for_each_entry(p, &pg->pgpaths, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static int pgpath_busy(struct pgpath *pgpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) return blk_lld_busy(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * We return "busy", only when we can map I/Os but underlying devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) * are busy (so even if we map I/Os now, the I/Os will wait on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * the underlying queue).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * In other words, if we want to kill I/Os or queue them inside us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) * due to map unavailability, we don't return "busy". Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * dm core won't give us the I/Os and we can't do what we want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) static int multipath_busy(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) bool busy = false, has_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) struct multipath *m = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) struct priority_group *pg, *next_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) struct pgpath *pgpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) /* pg_init in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (atomic_read(&m->pg_init_in_progress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) if (!atomic_read(&m->nr_valid_paths)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) spin_lock_irqsave(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return (m->queue_mode != DM_TYPE_REQUEST_BASED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) spin_unlock_irqrestore(&m->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) /* Guess which priority_group will be used at next mapping time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) pg = READ_ONCE(m->current_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) next_pg = READ_ONCE(m->next_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) pg = next_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (!pg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * We don't know which pg will be used at next mapping time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * We don't call choose_pgpath() here to avoid to trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * pg_init just by busy checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * So we don't know whether underlying devices we will be using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) * at next mapping time are busy or not. Just try mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) return busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * If there is one non-busy active path at least, the path selector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * will be able to select it. So we consider such a pg as not busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) list_for_each_entry(pgpath, &pg->pgpaths, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (pgpath->is_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) has_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (!pgpath_busy(pgpath)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (!has_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * No active path in this pg, so this pg won't be used and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * the current_pg will be changed at next mapping time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * We need to try mapping to determine it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) return busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * Module setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) static struct target_type multipath_target = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) .name = "multipath",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) .version = {1, 14, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) DM_TARGET_PASSES_INTEGRITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) .ctr = multipath_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) .dtr = multipath_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) .clone_and_map_rq = multipath_clone_and_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) .release_clone_rq = multipath_release_clone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) .rq_end_io = multipath_end_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) .map = multipath_map_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) .end_io = multipath_end_io_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) .presuspend = multipath_presuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) .postsuspend = multipath_postsuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) .resume = multipath_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) .status = multipath_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) .message = multipath_message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) .prepare_ioctl = multipath_prepare_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) .iterate_devices = multipath_iterate_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) .busy = multipath_busy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) static int __init dm_multipath_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) if (!kmultipathd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) DMERR("failed to create workqueue kmpathd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) goto bad_alloc_kmultipathd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * A separate workqueue is used to handle the device handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * to avoid overloading existing workqueue. Overloading the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * old workqueue would also create a bottleneck in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * path of the storage hardware device activation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) WQ_MEM_RECLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (!kmpath_handlerd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) DMERR("failed to create workqueue kmpath_handlerd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) goto bad_alloc_kmpath_handlerd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) r = dm_register_target(&multipath_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) DMERR("request-based register failed %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) goto bad_register_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) bad_register_target:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) destroy_workqueue(kmpath_handlerd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) bad_alloc_kmpath_handlerd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) destroy_workqueue(kmultipathd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) bad_alloc_kmultipathd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) static void __exit dm_multipath_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) destroy_workqueue(kmpath_handlerd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) destroy_workqueue(kmultipathd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) dm_unregister_target(&multipath_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) module_init(dm_multipath_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) module_exit(dm_multipath_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) module_param_named(queue_if_no_path_timeout_secs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) queue_if_no_path_timeout_secs, ulong, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) MODULE_DESCRIPTION(DM_NAME " multipath target");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) MODULE_LICENSE("GPL");