^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Device Mapper Uevent Support (dm-uevent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corporation, 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kobject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dm-ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "dm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "dm-uevent.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define DM_MSG_PREFIX "uevent"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) enum dm_uevent_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) enum kobject_action action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) } _dm_uevent_type_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static struct kmem_cache *_dm_event_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct dm_uevent {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct mapped_device *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) enum kobject_action action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct kobj_uevent_env ku_env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct list_head elist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) char name[DM_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) char uuid[DM_UUID_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static void dm_uevent_free(struct dm_uevent *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) kmem_cache_free(_dm_event_cache, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct dm_uevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) INIT_LIST_HEAD(&event->elist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) event->md = md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) enum kobject_action action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) const char *dm_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned nr_valid_paths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct dm_uevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) event = dm_uevent_alloc(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) DMERR("%s: dm_uevent_alloc() failed", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) goto err_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) event->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) DMERR("%s: add_uevent_var() for DM_TARGET failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) goto err_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) DMERR("%s: add_uevent_var() for DM_ACTION failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) goto err_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) dm_next_uevent_seq(md))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) goto err_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) goto err_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) nr_valid_paths)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto err_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) err_add:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dm_uevent_free(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) err_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * dm_send_uevents - send uevents for given list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @events: list of events to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @kobj: kobject generating event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void dm_send_uevents(struct list_head *events, struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct dm_uevent *event, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) list_for_each_entry_safe(event, next, events, elist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) list_del_init(&event->elist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * When a device is being removed this copy fails and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * discard these unsent events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (dm_copy_name_and_uuid(event->md, event->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) event->uuid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) DMINFO("%s: skipping sending uevent for lost device",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) goto uevent_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) DMERR("%s: add_uevent_var() for DM_NAME failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) goto uevent_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) DMERR("%s: add_uevent_var() for DM_UUID failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) goto uevent_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) DMERR("%s: kobject_uevent_env failed", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) uevent_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) dm_uevent_free(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) EXPORT_SYMBOL_GPL(dm_send_uevents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * dm_path_uevent - called to create a new path event and queue it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * @event_type: path event type enum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * @ti: pointer to a dm_target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * @path: string containing pathname
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @nr_valid_paths: number of valid paths remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) const char *path, unsigned nr_valid_paths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct mapped_device *md = dm_table_get_md(ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct dm_uevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) DMERR("%s: Invalid event_type %d", __func__, event_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) event = dm_build_path_uevent(md, ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) _dm_uevent_type_names[event_type].action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) _dm_uevent_type_names[event_type].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) path, nr_valid_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (IS_ERR(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dm_uevent_add(md, &event->elist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) EXPORT_SYMBOL_GPL(dm_path_uevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int dm_uevent_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) _dm_event_cache = KMEM_CACHE(dm_uevent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!_dm_event_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) DMINFO("version 1.0.3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) void dm_uevent_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) kmem_cache_destroy(_dm_event_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }