^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ipmi_msghandler.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Incoming and outgoing message routing for an IPMI interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Corey Minyard <minyard@mvista.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * source@mvista.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright 2002 MontaVista Software Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define dev_fmt pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/ipmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/ipmi_smi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/uuid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define IPMI_DRIVER_VERSION "39.2"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static int ipmi_init_msghandler(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void smi_recv_tasklet(struct tasklet_struct *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static void handle_new_recv_msgs(struct ipmi_smi *intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void need_waiter(struct ipmi_smi *intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int handle_one_recv_msg(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct ipmi_smi_msg *msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static bool initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static bool drvregistered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) enum ipmi_panic_event_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) IPMI_SEND_PANIC_EVENT_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) IPMI_SEND_PANIC_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) IPMI_SEND_PANIC_EVENT_STRING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #ifdef CONFIG_IPMI_PANIC_STRING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #elif defined(CONFIG_IPMI_PANIC_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int panic_op_write_handler(const char *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) char valcp[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) char *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) strncpy(valcp, val, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) valcp[15] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) s = strstrip(valcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (strcmp(s, "none") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) else if (strcmp(s, "event") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) else if (strcmp(s, "string") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) switch (ipmi_send_panic_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) case IPMI_SEND_PANIC_EVENT_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) strcpy(buffer, "none\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) case IPMI_SEND_PANIC_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) strcpy(buffer, "event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) case IPMI_SEND_PANIC_EVENT_STRING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) strcpy(buffer, "string\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) strcpy(buffer, "???\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return strlen(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static const struct kernel_param_ops panic_op_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) .set = panic_op_write_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) .get = panic_op_read_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define MAX_EVENTS_IN_QUEUE 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Remain in auto-maintenance mode for this amount of time (in ms). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static unsigned long maintenance_mode_timeout_ms = 30000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) module_param(maintenance_mode_timeout_ms, ulong, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) MODULE_PARM_DESC(maintenance_mode_timeout_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Don't let a message sit in a queue forever, always time it with at lest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * the max message timer. This is in milliseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define MAX_MSG_TIMEOUT 60000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Timeout times below are in milliseconds, and are done off a 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * second timer. So setting the value to 1000 would mean anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * between 0 and 1000ms. So really the only reasonable minimum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * setting it 2000ms, which is between 1 and 2 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* The default timeout for message retries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static unsigned long default_retry_ms = 2000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) module_param(default_retry_ms, ulong, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) MODULE_PARM_DESC(default_retry_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) "The time (milliseconds) between retry sends");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* The default timeout for maintenance mode message retries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static unsigned long default_maintenance_retry_ms = 3000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) module_param(default_maintenance_retry_ms, ulong, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) MODULE_PARM_DESC(default_maintenance_retry_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) "The time (milliseconds) between retry sends in maintenance mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* The default maximum number of retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static unsigned int default_max_retries = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) module_param(default_max_retries, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) MODULE_PARM_DESC(default_max_retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) "The time (milliseconds) between retry sends in maintenance mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Call every ~1000 ms. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define IPMI_TIMEOUT_TIME 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* How many jiffies does it take to get to the timeout time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Request events from the queue every second (this is the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * future, IPMI will add a way to know immediately if an event is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * the queue and this silliness can go away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* How long should we cache dynamic device IDs? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * The main "user" data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct ipmi_user {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * Set to NULL when the user is destroyed, a pointer to myself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * so srcu_dereference can be used on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct ipmi_user *self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct srcu_struct release_barrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct kref refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* The upper layer that handles receive messages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) const struct ipmi_user_hndl *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) void *handler_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* The interface this user is bound to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct ipmi_smi *intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* Does this interface receive IPMI events? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bool gets_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Free must run in process context for RCU cleanup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct work_struct remove_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static struct workqueue_struct *remove_work_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) __acquires(user->release_barrier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct ipmi_user *ruser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *index = srcu_read_lock(&user->release_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ruser = srcu_dereference(user->self, &user->release_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!ruser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) srcu_read_unlock(&user->release_barrier, *index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return ruser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static void release_ipmi_user(struct ipmi_user *user, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) srcu_read_unlock(&user->release_barrier, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct cmd_rcvr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct ipmi_user *user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned char netfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned char cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned int chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * This is used to form a linked lised during mass deletion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * Since this is in an RCU list, we cannot use the link above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * or change any data until the RCU period completes. So we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * use this next variable during mass deletion so we can have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * a list and don't have to wait and restart the search on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * every individual deletion of a command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct cmd_rcvr *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct seq_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) unsigned int inuse : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned int broadcast : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) unsigned long orig_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned int retries_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * To verify on an incoming send message response that this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * the message that the response is for, we keep a sequence id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * and increment it every time we send a message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) long seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * This is held so we can properly respond to the message on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * timeout, and it is used to hold the temporary data for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * retransmission, too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct ipmi_recv_msg *recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Store the information in a msgid (long) to allow us to find a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * sequence table entry from the msgid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define STORE_SEQ_IN_MSGID(seq, seqid) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) seq = (((msgid) >> 26) & 0x3f); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) seqid = ((msgid) & 0x3ffffff); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #define IPMI_MAX_CHANNELS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct ipmi_channel {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) unsigned char medium;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned char protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct ipmi_channel_set {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct ipmi_channel c[IPMI_MAX_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct ipmi_my_addrinfo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * but may be changed by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned char address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * My LUN. This should generally stay the SMS LUN, but just in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * case...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) unsigned char lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Note that the product id, manufacturer id, guid, and device id are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * immutable in this structure, so dyn_mutex is not required for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * accessing those. If those change on a BMC, a new BMC is allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct bmc_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct platform_device pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct list_head intfs; /* Interfaces on this BMC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct ipmi_device_id fetch_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) int dyn_id_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned long dyn_id_expiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) guid_t guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) guid_t fetch_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int dyn_guid_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct kref usecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct work_struct remove_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) unsigned char cc; /* completion code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct ipmi_device_id *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) bool *guid_set, guid_t *guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * Various statistics for IPMI, these index stats[] in the ipmi_smi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) enum ipmi_stat_indexes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Commands we got from the user that were invalid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) IPMI_STAT_sent_invalid_commands = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* Commands we sent to the MC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) IPMI_STAT_sent_local_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Responses from the MC that were delivered to a user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) IPMI_STAT_handled_local_responses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* Responses from the MC that were not delivered to a user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) IPMI_STAT_unhandled_local_responses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Commands we sent out to the IPMB bus. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) IPMI_STAT_sent_ipmb_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* Commands sent on the IPMB that had errors on the SEND CMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) IPMI_STAT_sent_ipmb_command_errs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Each retransmit increments this count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) IPMI_STAT_retransmitted_ipmb_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * When a message times out (runs out of retransmits) this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * incremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) IPMI_STAT_timed_out_ipmb_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * This is like above, but for broadcasts. Broadcasts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * *not* included in the above count (they are expected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * time out).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) IPMI_STAT_timed_out_ipmb_broadcasts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Responses I have sent to the IPMB bus. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) IPMI_STAT_sent_ipmb_responses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* The response was delivered to the user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) IPMI_STAT_handled_ipmb_responses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* The response had invalid data in it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) IPMI_STAT_invalid_ipmb_responses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* The response didn't have anyone waiting for it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) IPMI_STAT_unhandled_ipmb_responses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Commands we sent out to the IPMB bus. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) IPMI_STAT_sent_lan_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Commands sent on the IPMB that had errors on the SEND CMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) IPMI_STAT_sent_lan_command_errs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* Each retransmit increments this count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) IPMI_STAT_retransmitted_lan_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * When a message times out (runs out of retransmits) this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * incremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) IPMI_STAT_timed_out_lan_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* Responses I have sent to the IPMB bus. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) IPMI_STAT_sent_lan_responses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* The response was delivered to the user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) IPMI_STAT_handled_lan_responses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* The response had invalid data in it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) IPMI_STAT_invalid_lan_responses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* The response didn't have anyone waiting for it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) IPMI_STAT_unhandled_lan_responses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* The command was delivered to the user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) IPMI_STAT_handled_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* The command had invalid data in it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) IPMI_STAT_invalid_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* The command didn't have anyone waiting for it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) IPMI_STAT_unhandled_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* Invalid data in an event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) IPMI_STAT_invalid_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Events that were received with the proper format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) IPMI_STAT_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Retransmissions on IPMB that failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) IPMI_STAT_dropped_rexmit_ipmb_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* Retransmissions on LAN that failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) IPMI_STAT_dropped_rexmit_lan_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /* This *must* remain last, add new values above this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) IPMI_NUM_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) #define IPMI_IPMB_NUM_SEQ 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct ipmi_smi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct module *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* What interface number are we? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int intf_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct kref refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* Set when the interface is being unregistered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) bool in_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Used for a list of interfaces. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * The list of upper layers that are using me. seq_lock write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * protects this. Read protection is with srcu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct list_head users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct srcu_struct users_srcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* Used for wake ups at startup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) wait_queue_head_t waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * Prevents the interface from being unregistered when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * interface is used by being looked up through the BMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct mutex bmc_reg_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct bmc_device tmp_bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct bmc_device *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) bool bmc_registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct list_head bmc_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) char *my_dev_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) bool in_bmc_register; /* Handle recursive situations. Yuck. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct work_struct bmc_reg_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) const struct ipmi_smi_handlers *handlers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) void *send_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* Driver-model device for the system interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct device *si_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * A table of sequence numbers for this interface. We use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * sequence numbers for IPMB messages that go out of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * interface to match them up with their responses. A routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * is called periodically to time the items in this list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) spinlock_t seq_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int curr_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Messages queued for delivery. If delivery fails (out of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * for instance), They will stay in here to be processed later in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * periodic timer interrupt. The tasklet is for handling received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * messages directly from the handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) spinlock_t waiting_rcv_msgs_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct list_head waiting_rcv_msgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) atomic_t watchdog_pretimeouts_to_deliver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct tasklet_struct recv_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) spinlock_t xmit_msgs_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct list_head xmit_msgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct ipmi_smi_msg *curr_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct list_head hp_xmit_msgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * The list of command receivers that are registered for commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * on this interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct mutex cmd_rcvrs_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct list_head cmd_rcvrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * Events that were queues because no one was there to receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) spinlock_t events_lock; /* For dealing with event stuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct list_head waiting_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) unsigned int waiting_events_count; /* How many events in queue? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) char delivering_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) char event_msg_printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* How many users are waiting for events? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) atomic_t event_waiters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) unsigned int ticks_to_req_ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) spinlock_t watch_lock; /* For dealing with watch stuff below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* How many users are waiting for commands? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) unsigned int command_waiters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* How many users are waiting for watchdogs? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned int watchdog_waiters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* How many users are waiting for message responses? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned int response_waiters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Tells what the lower layer has last been asked to watch for,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * messages and/or watchdogs. Protected by watch_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) unsigned int last_watch_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * The event receiver for my BMC, only really used at panic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * shutdown as a place to store this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) unsigned char event_receiver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) unsigned char event_receiver_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) unsigned char local_sel_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) unsigned char local_event_generator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* For handling of maintenance mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) int maintenance_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) bool maintenance_mode_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int auto_maintenance_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) spinlock_t maintenance_mode_lock; /* Used in a timer... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * If we are doing maintenance on something on IPMB, extend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * the timeout time to avoid timeouts writing firmware and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * such.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int ipmb_maintenance_mode_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * A cheap hack, if this is non-null and a message to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * interface comes in with a NULL user, call this routine with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * it. Note that the message will still be freed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * caller. This only works on the system interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * Protected by bmc_reg_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) void (*null_user_handler)(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct ipmi_recv_msg *msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * When we are scanning the channels for an SMI, this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * tell which channel we are scanning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) int curr_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Channel information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct ipmi_channel_set *channel_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) unsigned int curr_working_cset; /* First index into the following. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct ipmi_channel_set wchannels[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) bool channels_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) atomic_t stats[IPMI_NUM_STATS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * run_to_completion duplicate of smb_info, smi_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * and ipmi_serial_info structures. Used to decrease numbers of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * parameters passed by "low" level IPMI code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) int run_to_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static void __get_guid(struct ipmi_smi *intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static int __ipmi_bmc_register(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct ipmi_device_id *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) bool guid_set, guid_t *guid, int intf_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * The driver model view of the IPMI messaging driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static struct platform_driver ipmidriver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) .name = "ipmi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) .bus = &platform_bus_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * This mutex keeps us from adding the same BMC twice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static DEFINE_MUTEX(ipmidriver_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static LIST_HEAD(ipmi_interfaces);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static DEFINE_MUTEX(ipmi_interfaces_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #define ipmi_interfaces_mutex_held() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) lockdep_is_held(&ipmi_interfaces_mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static struct srcu_struct ipmi_interfaces_srcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * List of watchers that want to know when smi's are added and deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static LIST_HEAD(smi_watchers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static DEFINE_MUTEX(smi_watchers_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) #define ipmi_inc_stat(intf, stat) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #define ipmi_get_stat(intf, stat) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static const char * const addr_src_to_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) "device-tree", "platform"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (src >= SI_LAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) src = 0; /* Invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return addr_src_to_str[src];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) EXPORT_SYMBOL(ipmi_addr_src_to_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static int is_lan_addr(struct ipmi_addr *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return addr->addr_type == IPMI_LAN_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static int is_ipmb_addr(struct ipmi_addr *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static void free_recv_msg_list(struct list_head *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct ipmi_recv_msg *msg, *msg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) list_for_each_entry_safe(msg, msg2, q, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) list_del(&msg->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ipmi_free_recv_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static void free_smi_msg_list(struct list_head *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct ipmi_smi_msg *msg, *msg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) list_for_each_entry_safe(msg, msg2, q, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) list_del(&msg->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ipmi_free_smi_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static void clean_up_interface_data(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct cmd_rcvr *rcvr, *rcvr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) tasklet_kill(&intf->recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) free_smi_msg_list(&intf->waiting_rcv_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) free_recv_msg_list(&intf->waiting_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * Wholesale remove all the entries from the list in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * interface and wait for RCU to know that none are in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) mutex_lock(&intf->cmd_rcvrs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) INIT_LIST_HEAD(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) mutex_unlock(&intf->cmd_rcvrs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) list_for_each_entry_safe(rcvr, rcvr2, &list, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) kfree(rcvr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if ((intf->seq_table[i].inuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) && (intf->seq_table[i].recv_msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static void intf_free(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) clean_up_interface_data(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) kfree(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct watcher_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int intf_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct ipmi_smi *intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct ipmi_smi *intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) int index, rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * Make sure the driver is actually initialized, this handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * problems with initialization order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) rv = ipmi_init_msghandler();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) mutex_lock(&smi_watchers_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) list_add(&watcher->link, &smi_watchers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) index = srcu_read_lock(&ipmi_interfaces_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int intf_num = READ_ONCE(intf->intf_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (intf_num == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) watcher->new_smi(intf_num, intf->si_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) srcu_read_unlock(&ipmi_interfaces_srcu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) mutex_unlock(&smi_watchers_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) EXPORT_SYMBOL(ipmi_smi_watcher_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) mutex_lock(&smi_watchers_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) list_del(&watcher->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) mutex_unlock(&smi_watchers_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Must be called with smi_watchers_mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) call_smi_watchers(int i, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct ipmi_smi_watcher *w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) mutex_lock(&smi_watchers_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) list_for_each_entry(w, &smi_watchers, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (try_module_get(w->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) w->new_smi(i, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) module_put(w->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) mutex_unlock(&smi_watchers_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (addr1->addr_type != addr2->addr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (addr1->channel != addr2->channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct ipmi_system_interface_addr *smi_addr1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) = (struct ipmi_system_interface_addr *) addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct ipmi_system_interface_addr *smi_addr2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) = (struct ipmi_system_interface_addr *) addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return (smi_addr1->lun == smi_addr2->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct ipmi_ipmb_addr *ipmb_addr1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) = (struct ipmi_ipmb_addr *) addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct ipmi_ipmb_addr *ipmb_addr2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) = (struct ipmi_ipmb_addr *) addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) && (ipmb_addr1->lun == ipmb_addr2->lun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (is_lan_addr(addr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct ipmi_lan_addr *lan_addr1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) = (struct ipmi_lan_addr *) addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct ipmi_lan_addr *lan_addr2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) = (struct ipmi_lan_addr *) addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) && (lan_addr1->local_SWID == lan_addr2->local_SWID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) && (lan_addr1->session_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) == lan_addr2->session_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) && (lan_addr1->lun == lan_addr2->lun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int ipmi_validate_addr(struct ipmi_addr *addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (len < sizeof(struct ipmi_system_interface_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (addr->channel != IPMI_BMC_CHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if ((addr->channel == IPMI_BMC_CHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) || (addr->channel >= IPMI_MAX_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) || (addr->channel < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (len < sizeof(struct ipmi_ipmb_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (is_lan_addr(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (len < sizeof(struct ipmi_lan_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) EXPORT_SYMBOL(ipmi_validate_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) unsigned int ipmi_addr_length(int addr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return sizeof(struct ipmi_system_interface_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if ((addr_type == IPMI_IPMB_ADDR_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return sizeof(struct ipmi_ipmb_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (addr_type == IPMI_LAN_ADDR_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return sizeof(struct ipmi_lan_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) EXPORT_SYMBOL(ipmi_addr_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (!msg->user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /* Special handling for NULL users. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (intf->null_user_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) intf->null_user_handler(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /* No handler, so give up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ipmi_free_recv_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) } else if (oops_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * If we are running in the panic context, calling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * receive handler doesn't much meaning and has a deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * risk. At this moment, simply skip it in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ipmi_free_recv_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) user->handler->ipmi_recv_hndl(msg, user->handler_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /* User went away, give up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ipmi_free_recv_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static void deliver_local_response(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct ipmi_recv_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (deliver_response(intf, msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ipmi_inc_stat(intf, unhandled_local_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ipmi_inc_stat(intf, handled_local_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static void deliver_err_response(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct ipmi_recv_msg *msg, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) msg->msg_data[0] = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) msg->msg.netfn |= 1; /* Convert to a response. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) msg->msg.data_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) msg->msg.data = msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) deliver_local_response(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (!intf->handlers->set_need_watch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) spin_lock_irqsave(&intf->watch_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) intf->response_waiters++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) intf->watchdog_waiters++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) intf->command_waiters++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if ((intf->last_watch_mask & flags) != flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) intf->last_watch_mask |= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) intf->handlers->set_need_watch(intf->send_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) intf->last_watch_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) spin_unlock_irqrestore(&intf->watch_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (!intf->handlers->set_need_watch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) spin_lock_irqsave(&intf->watch_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) intf->response_waiters--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) intf->watchdog_waiters--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) intf->command_waiters--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (intf->response_waiters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (intf->watchdog_waiters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (intf->command_waiters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (intf->last_watch_mask != flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) intf->last_watch_mask = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) intf->handlers->set_need_watch(intf->send_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) intf->last_watch_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) spin_unlock_irqrestore(&intf->watch_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * Find the next sequence number not being used and add the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * message with the given timeout to the sequence table. This must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * called with the interface's seq_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static int intf_next_seq(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct ipmi_recv_msg *recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) unsigned long timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) int retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int broadcast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) unsigned char *seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) long *seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (timeout == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) timeout = default_retry_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (retries < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) retries = default_max_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) i = (i+1)%IPMI_IPMB_NUM_SEQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (!intf->seq_table[i].inuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (!intf->seq_table[i].inuse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) intf->seq_table[i].recv_msg = recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * Start with the maximum timeout, when the send response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * comes in we will start the real timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) intf->seq_table[i].orig_timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) intf->seq_table[i].retries_left = retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) intf->seq_table[i].broadcast = broadcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) intf->seq_table[i].inuse = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) *seq = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) *seqid = intf->seq_table[i].seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) need_waiter(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) rv = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * Return the receive message for the given sequence number and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * release the sequence number so it can be reused. Some other data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * is passed in to be sure the message matches up correctly (to help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * guard against message coming in after their timeout and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * sequence number being reused).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static int intf_find_seq(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) unsigned char seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) short channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) unsigned char cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) unsigned char netfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct ipmi_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct ipmi_recv_msg **recv_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) int rv = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (seq >= IPMI_IPMB_NUM_SEQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) spin_lock_irqsave(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (intf->seq_table[seq].inuse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) && (msg->msg.netfn == netfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) && (ipmi_addr_equal(addr, &msg->addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) *recv_msg = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) intf->seq_table[seq].inuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) spin_unlock_irqrestore(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* Start the timer for a specific sequence table entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static int intf_start_seq_timer(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) long msgid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) int rv = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) unsigned char seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) unsigned long seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) GET_SEQ_FROM_MSGID(msgid, seq, seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) spin_lock_irqsave(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * We do this verification because the user can be deleted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * while a message is outstanding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if ((intf->seq_table[seq].inuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) && (intf->seq_table[seq].seqid == seqid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct seq_table *ent = &intf->seq_table[seq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) ent->timeout = ent->orig_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) spin_unlock_irqrestore(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /* Got an error for the send message for a specific sequence number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static int intf_err_seq(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) long msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) unsigned int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) int rv = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) unsigned char seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) unsigned long seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct ipmi_recv_msg *msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) GET_SEQ_FROM_MSGID(msgid, seq, seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) spin_lock_irqsave(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * We do this verification because the user can be deleted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * while a message is outstanding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if ((intf->seq_table[seq].inuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) && (intf->seq_table[seq].seqid == seqid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct seq_table *ent = &intf->seq_table[seq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) ent->inuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) msg = ent->recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) spin_unlock_irqrestore(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) deliver_err_response(intf, msg, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static void free_user_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct ipmi_user *user = container_of(work, struct ipmi_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) remove_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) cleanup_srcu_struct(&user->release_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) vfree(user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) int ipmi_create_user(unsigned int if_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) const struct ipmi_user_hndl *handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) void *handler_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct ipmi_user **user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct ipmi_user *new_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) int rv, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct ipmi_smi *intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * There is no module usecount here, because it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * required. Since this can only be used by and called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * other modules, they will implicitly use this module, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * thus this can't be removed unless the other modules are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (handler == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * Make sure the driver is actually initialized, this handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * problems with initialization order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) rv = ipmi_init_msghandler();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) new_user = vzalloc(sizeof(*new_user));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (!new_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) index = srcu_read_lock(&ipmi_interfaces_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (intf->intf_num == if_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /* Not found, return an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) goto out_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) INIT_WORK(&new_user->remove_work, free_user_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) rv = init_srcu_struct(&new_user->release_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) goto out_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (!try_module_get(intf->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) rv = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) goto out_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* Note that each existing user holds a refcount to the interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) kref_get(&intf->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) kref_init(&new_user->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) new_user->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) new_user->handler_data = handler_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) new_user->intf = intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) new_user->gets_events = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) rcu_assign_pointer(new_user->self, new_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) spin_lock_irqsave(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) list_add_rcu(&new_user->link, &intf->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) spin_unlock_irqrestore(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (handler->ipmi_watchdog_pretimeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /* User wants pretimeouts, so make sure to watch for them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) srcu_read_unlock(&ipmi_interfaces_srcu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) *user = new_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) out_kfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) srcu_read_unlock(&ipmi_interfaces_srcu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) vfree(new_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) EXPORT_SYMBOL(ipmi_create_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) int rv, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct ipmi_smi *intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) index = srcu_read_lock(&ipmi_interfaces_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (intf->intf_num == if_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) srcu_read_unlock(&ipmi_interfaces_srcu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* Not found, return an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (!intf->handlers->get_smi_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) rv = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) rv = intf->handlers->get_smi_info(intf->send_info, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) srcu_read_unlock(&ipmi_interfaces_srcu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) EXPORT_SYMBOL(ipmi_get_smi_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static void free_user(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* SRCU cleanup must happen in task context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) queue_work(remove_work_wq, &user->remove_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static void _ipmi_destroy_user(struct ipmi_user *user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct ipmi_smi *intf = user->intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct cmd_rcvr *rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct cmd_rcvr *rcvrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (!acquire_ipmi_user(user, &i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * The user has already been cleaned up, just make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * nothing is using it and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) synchronize_srcu(&user->release_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) rcu_assign_pointer(user->self, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) release_ipmi_user(user, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) synchronize_srcu(&user->release_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (user->handler->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) user->handler->shutdown(user->handler_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (user->handler->ipmi_watchdog_pretimeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (user->gets_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) atomic_dec(&intf->event_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /* Remove the user from the interface's sequence table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) spin_lock_irqsave(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) list_del_rcu(&user->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (intf->seq_table[i].inuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) && (intf->seq_table[i].recv_msg->user == user)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) intf->seq_table[i].inuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) spin_unlock_irqrestore(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * Remove the user from the command receiver's table. First
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * we build a list of everything (not using the standard link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * since other things may be using it till we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * synchronize_srcu()) then free everything in that list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) mutex_lock(&intf->cmd_rcvrs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (rcvr->user == user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) list_del_rcu(&rcvr->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) rcvr->next = rcvrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) rcvrs = rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) mutex_unlock(&intf->cmd_rcvrs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) while (rcvrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) rcvr = rcvrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) rcvrs = rcvr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) kfree(rcvr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) kref_put(&intf->refcount, intf_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) module_put(intf->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) int ipmi_destroy_user(struct ipmi_user *user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) _ipmi_destroy_user(user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) kref_put(&user->refcount, free_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) EXPORT_SYMBOL(ipmi_destroy_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) int ipmi_get_version(struct ipmi_user *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) unsigned char *major,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) unsigned char *minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) int rv, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (!rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) *major = ipmi_version_major(&id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) *minor = ipmi_version_minor(&id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) EXPORT_SYMBOL(ipmi_get_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) int ipmi_set_my_address(struct ipmi_user *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) unsigned int channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) unsigned char address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) int index, rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (channel >= IPMI_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) user->intf->addrinfo[channel].address = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) EXPORT_SYMBOL(ipmi_set_my_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int ipmi_get_my_address(struct ipmi_user *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) unsigned int channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) unsigned char *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) int index, rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (channel >= IPMI_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) *address = user->intf->addrinfo[channel].address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) EXPORT_SYMBOL(ipmi_get_my_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) int ipmi_set_my_LUN(struct ipmi_user *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) unsigned int channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) unsigned char LUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) int index, rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (channel >= IPMI_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) user->intf->addrinfo[channel].lun = LUN & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) EXPORT_SYMBOL(ipmi_set_my_LUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) int ipmi_get_my_LUN(struct ipmi_user *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) unsigned int channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) unsigned char *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) int index, rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (channel >= IPMI_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) *address = user->intf->addrinfo[channel].lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) EXPORT_SYMBOL(ipmi_get_my_LUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) int ipmi_get_maintenance_mode(struct ipmi_user *user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) int mode, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) mode = user->intf->maintenance_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) EXPORT_SYMBOL(ipmi_get_maintenance_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static void maintenance_mode_update(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (intf->handlers->set_maintenance_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) intf->handlers->set_maintenance_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) intf->send_info, intf->maintenance_mode_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) int rv = 0, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) struct ipmi_smi *intf = user->intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (intf->maintenance_mode != mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) case IPMI_MAINTENANCE_MODE_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) intf->maintenance_mode_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) = (intf->auto_maintenance_timeout > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) case IPMI_MAINTENANCE_MODE_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) intf->maintenance_mode_enable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) case IPMI_MAINTENANCE_MODE_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) intf->maintenance_mode_enable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) intf->maintenance_mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) maintenance_mode_update(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) EXPORT_SYMBOL(ipmi_set_maintenance_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) int ipmi_set_gets_events(struct ipmi_user *user, bool val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct ipmi_smi *intf = user->intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct ipmi_recv_msg *msg, *msg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct list_head msgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) INIT_LIST_HEAD(&msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) spin_lock_irqsave(&intf->events_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (user->gets_events == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) user->gets_events = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (atomic_inc_return(&intf->event_waiters) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) need_waiter(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) atomic_dec(&intf->event_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (intf->delivering_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * Another thread is delivering events for this, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * let it handle any new events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) /* Deliver any queued events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) while (user->gets_events && !list_empty(&intf->waiting_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) list_move_tail(&msg->link, &msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) intf->waiting_events_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (intf->event_msg_printed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) dev_warn(intf->si_dev, "Event queue no longer full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) intf->event_msg_printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) intf->delivering_events = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) spin_unlock_irqrestore(&intf->events_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) list_for_each_entry_safe(msg, msg2, &msgs, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) msg->user = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) kref_get(&user->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) deliver_local_response(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) spin_lock_irqsave(&intf->events_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) intf->delivering_events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) spin_unlock_irqrestore(&intf->events_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) EXPORT_SYMBOL(ipmi_set_gets_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) unsigned char netfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) unsigned char cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) unsigned char chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct cmd_rcvr *rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) && (rcvr->chans & (1 << chan)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) unsigned char netfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) unsigned char cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) unsigned int chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct cmd_rcvr *rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) && (rcvr->chans & chans))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) int ipmi_register_for_cmd(struct ipmi_user *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) unsigned char netfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) unsigned char cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) unsigned int chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) struct ipmi_smi *intf = user->intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) struct cmd_rcvr *rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) int rv = 0, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (!rcvr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) rcvr->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) rcvr->netfn = netfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) rcvr->chans = chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) rcvr->user = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) mutex_lock(&intf->cmd_rcvrs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /* Make sure the command/netfn is not already registered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) mutex_unlock(&intf->cmd_rcvrs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) kfree(rcvr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) EXPORT_SYMBOL(ipmi_register_for_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) int ipmi_unregister_for_cmd(struct ipmi_user *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) unsigned char netfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) unsigned char cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) unsigned int chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct ipmi_smi *intf = user->intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct cmd_rcvr *rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct cmd_rcvr *rcvrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) int i, rv = -ENOENT, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) mutex_lock(&intf->cmd_rcvrs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (((1 << i) & chans) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (rcvr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (rcvr->user == user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) rcvr->chans &= ~chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (rcvr->chans == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) list_del_rcu(&rcvr->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) rcvr->next = rcvrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) rcvrs = rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) mutex_unlock(&intf->cmd_rcvrs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) while (rcvrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) rcvr = rcvrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) rcvrs = rcvr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) kfree(rcvr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) EXPORT_SYMBOL(ipmi_unregister_for_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) static unsigned char
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) ipmb_checksum(unsigned char *data, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) unsigned char csum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) for (; size > 0; size--, data++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) csum += *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return -csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct kernel_ipmi_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) struct ipmi_ipmb_addr *ipmb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) long msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) unsigned char ipmb_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) int broadcast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) unsigned char source_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) unsigned char source_lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) int i = broadcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) /* Format the IPMB header data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) smi_msg->data[1] = IPMI_SEND_MSG_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) smi_msg->data[2] = ipmb_addr->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (broadcast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) smi_msg->data[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) smi_msg->data[i+3] = ipmb_addr->slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) smi_msg->data[i+6] = source_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) smi_msg->data[i+8] = msg->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) /* Now tack on the data to the message. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (msg->data_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) smi_msg->data_size = msg->data_len + 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /* Now calculate the checksum and tack it on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) smi_msg->data[i+smi_msg->data_size]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * Add on the checksum size and the offset from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) * broadcast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) smi_msg->data_size += 1 + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) smi_msg->msgid = msgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) struct kernel_ipmi_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) struct ipmi_lan_addr *lan_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) long msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) unsigned char ipmb_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) unsigned char source_lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) /* Format the IPMB header data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) smi_msg->data[1] = IPMI_SEND_MSG_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) smi_msg->data[2] = lan_addr->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) smi_msg->data[3] = lan_addr->session_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) smi_msg->data[4] = lan_addr->remote_SWID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) smi_msg->data[7] = lan_addr->local_SWID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) smi_msg->data[9] = msg->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* Now tack on the data to the message. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (msg->data_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) memcpy(&smi_msg->data[10], msg->data, msg->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) smi_msg->data_size = msg->data_len + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) /* Now calculate the checksum and tack it on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) smi_msg->data[smi_msg->data_size]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * Add on the checksum size and the offset from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * broadcast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) smi_msg->data_size += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) smi_msg->msgid = msgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct ipmi_smi_msg *smi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (intf->curr_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (priority > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) list_add_tail(&smi_msg->link, &intf->xmit_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) smi_msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) intf->curr_msg = smi_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return smi_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) static void smi_send(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) const struct ipmi_smi_handlers *handlers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct ipmi_smi_msg *smi_msg, int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) int run_to_completion = intf->run_to_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) smi_msg = smi_add_send_msg(intf, smi_msg, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (smi_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) handlers->sender(intf->send_info, smi_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) && ((msg->cmd == IPMI_COLD_RESET_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) || (msg->cmd == IPMI_WARM_RESET_CMD)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) struct ipmi_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) long msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct kernel_ipmi_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) struct ipmi_smi_msg *smi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) struct ipmi_recv_msg *recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) int retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) unsigned int retry_time_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) struct ipmi_system_interface_addr *smi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (msg->netfn & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) /* Responses are not allowed to the SMI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) smi_addr = (struct ipmi_system_interface_addr *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (smi_addr->lun > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) && ((msg->cmd == IPMI_SEND_MSG_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) || (msg->cmd == IPMI_GET_MSG_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * We don't let the user do these, since we manage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * the sequence numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (is_maintenance_mode_cmd(msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) intf->auto_maintenance_timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) = maintenance_mode_timeout_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (!intf->maintenance_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) && !intf->maintenance_mode_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) intf->maintenance_mode_enable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) maintenance_mode_update(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) spin_unlock_irqrestore(&intf->maintenance_mode_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) smi_msg->data[1] = msg->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) smi_msg->msgid = msgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) smi_msg->user_data = recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if (msg->data_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) memcpy(&smi_msg->data[2], msg->data, msg->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) smi_msg->data_size = msg->data_len + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) ipmi_inc_stat(intf, sent_local_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct ipmi_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) long msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) struct kernel_ipmi_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) struct ipmi_smi_msg *smi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct ipmi_recv_msg *recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) unsigned char source_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) unsigned char source_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) int retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) unsigned int retry_time_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct ipmi_ipmb_addr *ipmb_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) unsigned char ipmb_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) long seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) int broadcast = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) struct ipmi_channel *chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (addr->channel >= IPMI_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) chans = READ_ONCE(intf->channel_list)->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * Broadcasts add a zero at the beginning of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * message, but otherwise is the same as an IPMB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) addr->addr_type = IPMI_IPMB_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) broadcast = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) retries = 0; /* Don't retry broadcasts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * 9 for the header and 1 for the checksum, plus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * possibly one for the broadcast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) ipmb_addr = (struct ipmi_ipmb_addr *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (ipmb_addr->lun > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (recv_msg->msg.netfn & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * It's a response, so use the user's sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * from msgid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) ipmi_inc_stat(intf, sent_ipmb_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) msgid, broadcast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) source_address, source_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * Save the receive message so we can use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * to deliver the response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) smi_msg->user_data = recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) /* It's a command, so get a sequence for it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) spin_lock_irqsave(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (is_maintenance_mode_cmd(msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) intf->ipmb_maintenance_mode_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) maintenance_mode_timeout_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) /* Different default in maintenance mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) retry_time_ms = default_maintenance_retry_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) * Create a sequence number with a 1 second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * timeout and 4 retries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) rv = intf_next_seq(intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) retry_time_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) broadcast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) &ipmb_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) &seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * We have used up all the sequence numbers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * probably, so abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) ipmi_inc_stat(intf, sent_ipmb_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * Store the sequence number in the message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) * so that when the send message response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) * comes back we can start the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) format_ipmb_msg(smi_msg, msg, ipmb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) ipmb_seq, broadcast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) source_address, source_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * Copy the message into the recv message data, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * can retransmit it later if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) memcpy(recv_msg->msg_data, smi_msg->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) smi_msg->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) recv_msg->msg.data = recv_msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) recv_msg->msg.data_len = smi_msg->data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * We don't unlock until here, because we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * to copy the completed message into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * recv_msg before we release the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * Otherwise, race conditions may bite us. I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) * know that's pretty paranoid, but I prefer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * to be correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) spin_unlock_irqrestore(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) static int i_ipmi_req_lan(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) struct ipmi_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) long msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) struct kernel_ipmi_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) struct ipmi_smi_msg *smi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) struct ipmi_recv_msg *recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) unsigned char source_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) int retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) unsigned int retry_time_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) struct ipmi_lan_addr *lan_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) unsigned char ipmb_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) long seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) struct ipmi_channel *chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (addr->channel >= IPMI_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) chans = READ_ONCE(intf->channel_list)->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) if ((chans[addr->channel].medium
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) != IPMI_CHANNEL_MEDIUM_8023LAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) && (chans[addr->channel].medium
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) != IPMI_CHANNEL_MEDIUM_ASYNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) /* 11 for the header and 1 for the checksum. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) lan_addr = (struct ipmi_lan_addr *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (lan_addr->lun > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (recv_msg->msg.netfn & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) * It's a response, so use the user's sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) * from msgid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) ipmi_inc_stat(intf, sent_lan_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) format_lan_msg(smi_msg, msg, lan_addr, msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) msgid, source_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * Save the receive message so we can use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * to deliver the response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) smi_msg->user_data = recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) /* It's a command, so get a sequence for it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) spin_lock_irqsave(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * Create a sequence number with a 1 second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * timeout and 4 retries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) rv = intf_next_seq(intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) retry_time_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) &ipmb_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) &seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * We have used up all the sequence numbers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * probably, so abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) ipmi_inc_stat(intf, sent_lan_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * Store the sequence number in the message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * so that when the send message response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * comes back we can start the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) format_lan_msg(smi_msg, msg, lan_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) ipmb_seq, source_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) * Copy the message into the recv message data, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * can retransmit it later if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) memcpy(recv_msg->msg_data, smi_msg->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) smi_msg->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) recv_msg->msg.data = recv_msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) recv_msg->msg.data_len = smi_msg->data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * We don't unlock until here, because we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * to copy the completed message into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * recv_msg before we release the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) * Otherwise, race conditions may bite us. I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * know that's pretty paranoid, but I prefer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) * to be correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) spin_unlock_irqrestore(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * Separate from ipmi_request so that the user does not have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * supplied in certain circumstances (mainly at panic time). If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * messages are supplied, they will be freed, even if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) static int i_ipmi_request(struct ipmi_user *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) struct ipmi_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) long msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) struct kernel_ipmi_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) void *user_msg_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) void *supplied_smi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) struct ipmi_recv_msg *supplied_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) int priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) unsigned char source_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) unsigned char source_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) int retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) unsigned int retry_time_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) struct ipmi_smi_msg *smi_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) struct ipmi_recv_msg *recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (supplied_recv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) recv_msg = supplied_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) recv_msg = ipmi_alloc_recv_msg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (recv_msg == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) recv_msg->user_msg_data = user_msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (supplied_smi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) smi_msg = (struct ipmi_smi_msg *) supplied_smi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) smi_msg = ipmi_alloc_smi_msg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (smi_msg == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (!supplied_recv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) ipmi_free_recv_msg(recv_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if (intf->in_shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) rv = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) recv_msg->user = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) if (user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) /* The put happens when the message is freed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) kref_get(&user->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) recv_msg->msgid = msgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * Store the message to send in the receive message so timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * responses can get the proper response data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) recv_msg->msg = *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) recv_msg, retries, retry_time_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) source_address, source_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) retries, retry_time_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) } else if (is_lan_addr(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) source_lun, retries, retry_time_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) /* Unknown address type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) ipmi_inc_stat(intf, sent_invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) ipmi_free_smi_msg(smi_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) ipmi_free_recv_msg(recv_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) smi_send(intf, intf->handlers, smi_msg, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) static int check_addr(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) struct ipmi_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) unsigned char *saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) unsigned char *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if (addr->channel >= IPMI_MAX_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) *lun = intf->addrinfo[addr->channel].lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) *saddr = intf->addrinfo[addr->channel].address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) int ipmi_request_settime(struct ipmi_user *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) struct ipmi_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) long msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) struct kernel_ipmi_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) void *user_msg_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) int priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) int retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) unsigned int retry_time_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) unsigned char saddr = 0, lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) int rv, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) rv = check_addr(user->intf, addr, &saddr, &lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (!rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) rv = i_ipmi_request(user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) user->intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) user_msg_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) retry_time_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) EXPORT_SYMBOL(ipmi_request_settime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) int ipmi_request_supply_msgs(struct ipmi_user *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) struct ipmi_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) long msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) struct kernel_ipmi_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) void *user_msg_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) void *supplied_smi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) struct ipmi_recv_msg *supplied_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) unsigned char saddr = 0, lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) int rv, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) user = acquire_ipmi_user(user, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) rv = check_addr(user->intf, addr, &saddr, &lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) if (!rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) rv = i_ipmi_request(user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) user->intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) msgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) user_msg_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) supplied_smi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) supplied_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) release_ipmi_user(user, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) EXPORT_SYMBOL(ipmi_request_supply_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) static void bmc_device_id_handler(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) struct ipmi_recv_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) dev_warn(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) /* record completion code when error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) intf->bmc->cc = msg->msg.data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) intf->bmc->dyn_id_set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) * Make sure the id data is available before setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) * dyn_id_set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) intf->bmc->dyn_id_set = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) wake_up(&intf->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) send_get_device_id_cmd(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) struct ipmi_system_interface_addr si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) struct kernel_ipmi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) si.channel = IPMI_BMC_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) si.lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) msg.netfn = IPMI_NETFN_APP_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) msg.cmd = IPMI_GET_DEVICE_ID_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) msg.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) msg.data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) return i_ipmi_request(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) (struct ipmi_addr *) &si,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) intf->addrinfo[0].address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) intf->addrinfo[0].lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) unsigned int retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) intf->null_user_handler = bmc_device_id_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) bmc->cc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) bmc->dyn_id_set = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) rv = send_get_device_id_cmd(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) goto out_reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) wait_event(intf->waitq, bmc->dyn_id_set != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) if (!bmc->dyn_id_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if ((bmc->cc == IPMI_DEVICE_IN_FW_UPDATE_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) || bmc->cc == IPMI_DEVICE_IN_INIT_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) || bmc->cc == IPMI_NOT_IN_MY_STATE_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) dev_warn(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) "BMC returned 0x%2.2x, retry get bmc device id\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) bmc->cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) rv = -EIO; /* Something went wrong in the fetch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) /* dyn_id_set makes the id data available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) out_reset_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) intf->null_user_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) * Fetch the device id for the bmc/interface. You must pass in either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) * bmc or intf, this code will get the other one. If the data has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) * been recently fetched, this will just use the cached data. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) * it will run a new fetch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) * Except for the first time this is called (in ipmi_add_smi()),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) * this will always return good data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) struct ipmi_device_id *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) bool *guid_set, guid_t *guid, int intf_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) int prev_dyn_id_set, prev_guid_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) bool intf_set = intf != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (!intf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) mutex_lock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) retry_bmc_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) if (list_empty(&bmc->intfs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) mutex_unlock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) bmc_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) kref_get(&intf->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) mutex_unlock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) mutex_lock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) mutex_lock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) bmc_link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) mutex_unlock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) kref_put(&intf->refcount, intf_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) goto retry_bmc_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) mutex_lock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) bmc = intf->bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) mutex_lock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) kref_get(&intf->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) /* If we have a valid and current ID, just return that. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) if (intf->in_bmc_register ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) goto out_noprocessing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) prev_guid_set = bmc->dyn_guid_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) __get_guid(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) prev_dyn_id_set = bmc->dyn_id_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) rv = __get_device_id(intf, bmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) * The guid, device id, manufacturer id, and product id should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) * not change on a BMC. If it does we have to do some dancing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) if (!intf->bmc_registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) || (!prev_guid_set && bmc->dyn_guid_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) || (!prev_dyn_id_set && bmc->dyn_id_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) || (prev_guid_set && bmc->dyn_guid_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) && !guid_equal(&bmc->guid, &bmc->fetch_guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) || bmc->id.device_id != bmc->fetch_id.device_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) || bmc->id.product_id != bmc->fetch_id.product_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) struct ipmi_device_id id = bmc->fetch_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) int guid_set = bmc->dyn_guid_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) guid_t guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) guid = bmc->fetch_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) mutex_unlock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) __ipmi_bmc_unregister(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) /* Fill in the temporary BMC for good measure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) intf->bmc->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) intf->bmc->dyn_guid_set = guid_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) intf->bmc->guid = guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) need_waiter(intf); /* Retry later on an error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) __scan_channels(intf, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (!intf_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) * We weren't given the interface on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) * command line, so restart the operation on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) * the next interface for the BMC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) mutex_unlock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) mutex_lock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) goto retry_bmc_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /* We have a new BMC, set it up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) bmc = intf->bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) mutex_lock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) goto out_noprocessing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) /* Version info changes, scan the channels again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) __scan_channels(intf, &bmc->fetch_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (rv && prev_dyn_id_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) rv = 0; /* Ignore failures if we have previous data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) bmc->dyn_id_set = prev_dyn_id_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (!rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) bmc->id = bmc->fetch_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) if (bmc->dyn_guid_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) bmc->guid = bmc->fetch_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) else if (prev_guid_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) * The guid used to be valid and it failed to fetch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * just use the cached value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) bmc->dyn_guid_set = prev_guid_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) out_noprocessing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (!rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) *id = bmc->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (guid_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) *guid_set = bmc->dyn_guid_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (guid && bmc->dyn_guid_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) *guid = bmc->guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) mutex_unlock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) mutex_unlock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) kref_put(&intf->refcount, intf_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) struct ipmi_device_id *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) bool *guid_set, guid_t *guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) static ssize_t device_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) return snprintf(buf, 10, "%u\n", id.device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) static DEVICE_ATTR_RO(device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) static ssize_t provides_device_sdrs_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) static DEVICE_ATTR_RO(provides_device_sdrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) static DEVICE_ATTR_RO(revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) static ssize_t firmware_revision_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) id.firmware_revision_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) static DEVICE_ATTR_RO(firmware_revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) static ssize_t ipmi_version_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) return snprintf(buf, 20, "%u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) ipmi_version_major(&id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) ipmi_version_minor(&id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) static DEVICE_ATTR_RO(ipmi_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) static ssize_t add_dev_support_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) static ssize_t manufacturer_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) static DEVICE_ATTR_RO(manufacturer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) static ssize_t product_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) static DEVICE_ATTR_RO(product_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) static ssize_t aux_firmware_rev_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) id.aux_firmware_revision[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) id.aux_firmware_revision[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) id.aux_firmware_revision[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) id.aux_firmware_revision[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) bool guid_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) guid_t guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) if (!guid_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) static DEVICE_ATTR_RO(guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) static struct attribute *bmc_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) &dev_attr_device_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) &dev_attr_provides_device_sdrs.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) &dev_attr_revision.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) &dev_attr_firmware_revision.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) &dev_attr_ipmi_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) &dev_attr_additional_device_support.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) &dev_attr_manufacturer_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) &dev_attr_product_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) &dev_attr_aux_firmware_revision.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) &dev_attr_guid.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) struct attribute *attr, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) struct device *dev = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) struct bmc_device *bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) umode_t mode = attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) if (attr == &dev_attr_aux_firmware_revision.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) return (!rv && id.aux_firmware_revision_set) ? mode : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) if (attr == &dev_attr_guid.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) bool guid_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) return (!rv && guid_set) ? mode : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) static const struct attribute_group bmc_dev_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) .attrs = bmc_dev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) .is_visible = bmc_dev_attr_is_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) static const struct attribute_group *bmc_dev_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) &bmc_dev_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) static const struct device_type bmc_device_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) .groups = bmc_dev_attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static int __find_bmc_guid(struct device *dev, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) const guid_t *guid = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) struct bmc_device *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (dev->type != &bmc_device_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) rv = kref_get_unless_zero(&bmc->usecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) * Returns with the bmc's usecount incremented, if it is non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) guid_t *guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) struct bmc_device *bmc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) return bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) struct prod_dev_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) unsigned int product_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) unsigned char device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) const struct prod_dev_id *cid = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) struct bmc_device *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) if (dev->type != &bmc_device_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) rv = (bmc->id.product_id == cid->product_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) && bmc->id.device_id == cid->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) rv = kref_get_unless_zero(&bmc->usecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) * Returns with the bmc's usecount incremented, if it is non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) static struct bmc_device *ipmi_find_bmc_prod_dev_id(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) struct device_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) unsigned int product_id, unsigned char device_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) struct prod_dev_id id = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) .product_id = product_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) .device_id = device_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) struct bmc_device *bmc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) bmc = to_bmc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) return bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) static DEFINE_IDA(ipmi_bmc_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) release_bmc_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) kfree(to_bmc_device(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) static void cleanup_bmc_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) struct bmc_device *bmc = container_of(work, struct bmc_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) remove_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) int id = bmc->pdev.id; /* Unregister overwrites id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) platform_device_unregister(&bmc->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) ida_simple_remove(&ipmi_bmc_ida, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) cleanup_bmc_device(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) * Remove the platform device in a work queue to avoid issues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) * with removing the device attributes while reading a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) * attribute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) queue_work(remove_work_wq, &bmc->remove_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) * Must be called with intf->bmc_reg_mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) struct bmc_device *bmc = intf->bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) if (!intf->bmc_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) sysfs_remove_link(&intf->si_dev->kobj, "bmc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) kfree(intf->my_dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) intf->my_dev_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) mutex_lock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) list_del(&intf->bmc_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) mutex_unlock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) intf->bmc = &intf->tmp_bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) kref_put(&bmc->usecount, cleanup_bmc_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) intf->bmc_registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) static void ipmi_bmc_unregister(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) mutex_lock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) __ipmi_bmc_unregister(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) mutex_unlock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) * Must be called with intf->bmc_reg_mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) static int __ipmi_bmc_register(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) struct ipmi_device_id *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) bool guid_set, guid_t *guid, int intf_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) struct bmc_device *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) struct bmc_device *old_bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) * platform_device_register() can cause bmc_reg_mutex to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) * be claimed because of the is_visible functions of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) * the attributes. Eliminate possible recursion and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) * release the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) intf->in_bmc_register = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) mutex_unlock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) * Try to find if there is an bmc_device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) * representing the interfaced BMC already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) mutex_lock(&ipmidriver_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) if (guid_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) id->product_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) id->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) * If there is already an bmc_device, free the new one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) * otherwise register the new BMC device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (old_bmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) bmc = old_bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) * Note: old_bmc already has usecount incremented by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) * the BMC find functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) intf->bmc = old_bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) mutex_lock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) list_add_tail(&intf->bmc_link, &bmc->intfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) mutex_unlock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) dev_info(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) bmc->id.manufacturer_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) bmc->id.product_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) bmc->id.device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) if (!bmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) INIT_LIST_HEAD(&bmc->intfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) mutex_init(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) bmc->id = *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) bmc->dyn_id_set = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) bmc->dyn_guid_set = guid_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) bmc->guid = *guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) bmc->pdev.name = "ipmi_bmc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) if (rv < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) kfree(bmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) bmc->pdev.dev.driver = &ipmidriver.driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) bmc->pdev.id = rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) bmc->pdev.dev.release = release_bmc_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) bmc->pdev.dev.type = &bmc_device_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) kref_init(&bmc->usecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) intf->bmc = bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) mutex_lock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) list_add_tail(&intf->bmc_link, &bmc->intfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) mutex_unlock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) rv = platform_device_register(&bmc->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) dev_err(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) "Unable to register bmc device: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) goto out_list_del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) dev_info(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) bmc->id.manufacturer_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) bmc->id.product_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) bmc->id.device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) * create symlink from system interface device to bmc device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) * and back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) goto out_put_bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) if (intf_num == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) intf_num = intf->intf_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) if (!intf->my_dev_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) goto out_unlink1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) intf->my_dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) goto out_free_my_dev_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) intf->bmc_registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) mutex_unlock(&ipmidriver_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) mutex_lock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) intf->in_bmc_register = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) out_free_my_dev_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) kfree(intf->my_dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) intf->my_dev_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) out_unlink1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) sysfs_remove_link(&intf->si_dev->kobj, "bmc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) out_put_bmc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) mutex_lock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) list_del(&intf->bmc_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) mutex_unlock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) intf->bmc = &intf->tmp_bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) kref_put(&bmc->usecount, cleanup_bmc_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) out_list_del:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) mutex_lock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) list_del(&intf->bmc_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) mutex_unlock(&bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) intf->bmc = &intf->tmp_bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) put_device(&bmc->pdev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) send_guid_cmd(struct ipmi_smi *intf, int chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) struct kernel_ipmi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) struct ipmi_system_interface_addr si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) si.channel = IPMI_BMC_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) si.lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) msg.netfn = IPMI_NETFN_APP_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) msg.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) msg.data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) return i_ipmi_request(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) (struct ipmi_addr *) &si,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) intf->addrinfo[0].address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) intf->addrinfo[0].lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) struct bmc_device *bmc = intf->bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) /* Not for me */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) if (msg->msg.data[0] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) /* Error from getting the GUID, the BMC doesn't have one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) bmc->dyn_guid_set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) if (msg->msg.data_len < UUID_SIZE + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) bmc->dyn_guid_set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) dev_warn(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) msg->msg.data_len, UUID_SIZE + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) import_guid(&bmc->fetch_guid, msg->msg.data + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) * Make sure the guid data is available before setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) * dyn_guid_set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) bmc->dyn_guid_set = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) wake_up(&intf->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) static void __get_guid(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) struct bmc_device *bmc = intf->bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) bmc->dyn_guid_set = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) intf->null_user_handler = guid_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) rv = send_guid_cmd(intf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) /* Send failed, no GUID available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) bmc->dyn_guid_set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) wait_event(intf->waitq, bmc->dyn_guid_set != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) /* dyn_guid_set makes the guid data available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) intf->null_user_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) send_channel_info_cmd(struct ipmi_smi *intf, int chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) struct kernel_ipmi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) unsigned char data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) struct ipmi_system_interface_addr si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) si.channel = IPMI_BMC_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) si.lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) msg.netfn = IPMI_NETFN_APP_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) msg.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) msg.data_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) data[0] = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) return i_ipmi_request(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) (struct ipmi_addr *) &si,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) intf->addrinfo[0].address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) intf->addrinfo[0].lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) int ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) unsigned int set = intf->curr_working_cset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) struct ipmi_channel *chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) /* It's the one we want */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) if (msg->msg.data[0] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) /* Got an error from the channel, just go on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) * If the MC does not support this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) * command, that is legal. We just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) * assume it has one IPMB at channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) * zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) intf->wchannels[set].c[0].medium
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) = IPMI_CHANNEL_MEDIUM_IPMB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) intf->wchannels[set].c[0].protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) = IPMI_CHANNEL_PROTOCOL_IPMB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) intf->channel_list = intf->wchannels + set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) intf->channels_ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) wake_up(&intf->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) goto next_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) if (msg->msg.data_len < 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) /* Message not big enough, just go on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) goto next_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) ch = intf->curr_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) chans = intf->wchannels[set].c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) chans[ch].medium = msg->msg.data[2] & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) chans[ch].protocol = msg->msg.data[3] & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) next_channel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) intf->curr_channel++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) intf->channel_list = intf->wchannels + set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) intf->channels_ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) wake_up(&intf->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) intf->channel_list = intf->wchannels + set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) intf->channels_ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) rv = send_channel_info_cmd(intf, intf->curr_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) /* Got an error somehow, just give up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) dev_warn(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) "Error sending channel information for channel %d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) intf->curr_channel, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) intf->channel_list = intf->wchannels + set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) intf->channels_ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) wake_up(&intf->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) * Must be holding intf->bmc_reg_mutex to call this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) if (ipmi_version_major(id) > 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) || (ipmi_version_major(id) == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) && ipmi_version_minor(id) >= 5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) unsigned int set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) * Start scanning the channels to see what is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) * available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) set = !intf->curr_working_cset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) intf->curr_working_cset = set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) memset(&intf->wchannels[set], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) sizeof(struct ipmi_channel_set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) intf->null_user_handler = channel_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) intf->curr_channel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) rv = send_channel_info_cmd(intf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) dev_warn(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) "Error sending channel information for channel 0, %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) intf->null_user_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) /* Wait for the channel info to be read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) wait_event(intf->waitq, intf->channels_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) intf->null_user_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) unsigned int set = intf->curr_working_cset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) /* Assume a single IPMB channel at zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) intf->channel_list = intf->wchannels + set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) intf->channels_ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) static void ipmi_poll(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) if (intf->handlers->poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) intf->handlers->poll(intf->send_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) /* In case something came in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) handle_new_recv_msgs(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) void ipmi_poll_interface(struct ipmi_user *user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) ipmi_poll(user->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) EXPORT_SYMBOL(ipmi_poll_interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) static void redo_bmc_reg(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) bmc_reg_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) if (!intf->in_shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) kref_put(&intf->refcount, intf_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) int ipmi_add_smi(struct module *owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) const struct ipmi_smi_handlers *handlers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) void *send_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) struct device *si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) unsigned char slave_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) struct ipmi_smi *intf, *tintf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) struct list_head *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) struct ipmi_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) * Make sure the driver is actually initialized, this handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) * problems with initialization order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) rv = ipmi_init_msghandler();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) intf = kzalloc(sizeof(*intf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) if (!intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) rv = init_srcu_struct(&intf->users_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) kfree(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) intf->owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) intf->bmc = &intf->tmp_bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) INIT_LIST_HEAD(&intf->bmc->intfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) mutex_init(&intf->bmc->dyn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) INIT_LIST_HEAD(&intf->bmc_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) mutex_init(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) intf->intf_num = -1; /* Mark it invalid for now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) kref_init(&intf->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) intf->si_dev = si_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) intf->addrinfo[j].lun = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) if (slave_addr != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) intf->addrinfo[0].address = slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) INIT_LIST_HEAD(&intf->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) intf->handlers = handlers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) intf->send_info = send_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) spin_lock_init(&intf->seq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) intf->seq_table[j].inuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) intf->seq_table[j].seqid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) intf->curr_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) spin_lock_init(&intf->waiting_rcv_msgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) tasklet_setup(&intf->recv_tasklet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) smi_recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) spin_lock_init(&intf->xmit_msgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) INIT_LIST_HEAD(&intf->xmit_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) INIT_LIST_HEAD(&intf->hp_xmit_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) spin_lock_init(&intf->events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) spin_lock_init(&intf->watch_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) atomic_set(&intf->event_waiters, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) INIT_LIST_HEAD(&intf->waiting_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) intf->waiting_events_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) mutex_init(&intf->cmd_rcvrs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) spin_lock_init(&intf->maintenance_mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) INIT_LIST_HEAD(&intf->cmd_rcvrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) init_waitqueue_head(&intf->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) for (i = 0; i < IPMI_NUM_STATS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) atomic_set(&intf->stats[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) mutex_lock(&ipmi_interfaces_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) /* Look for a hole in the numbers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) link = &ipmi_interfaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) ipmi_interfaces_mutex_held()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) if (tintf->intf_num != i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) link = &tintf->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) /* Add the new interface in numeric order. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) list_add_rcu(&intf->link, &ipmi_interfaces);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) list_add_tail_rcu(&intf->link, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) rv = handlers->start_processing(send_info, intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) dev_err(si_dev, "Unable to get the device id: %d\n", rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) goto out_err_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) mutex_lock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) rv = __scan_channels(intf, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) mutex_unlock(&intf->bmc_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) goto out_err_bmc_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) * Keep memory order straight for RCU readers. Make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) * sure everything else is committed to memory before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) * setting intf_num to mark the interface valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) intf->intf_num = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) mutex_unlock(&ipmi_interfaces_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) /* After this point the interface is legal to use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) call_smi_watchers(i, intf->si_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) out_err_bmc_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) ipmi_bmc_unregister(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) out_err_started:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) if (intf->handlers->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) intf->handlers->shutdown(intf->send_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) list_del_rcu(&intf->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) mutex_unlock(&ipmi_interfaces_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) synchronize_srcu(&ipmi_interfaces_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) cleanup_srcu_struct(&intf->users_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) kref_put(&intf->refcount, intf_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) EXPORT_SYMBOL(ipmi_add_smi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) static void deliver_smi_err_response(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) struct ipmi_smi_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) unsigned char err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) msg->rsp[0] = msg->data[0] | 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) msg->rsp[1] = msg->data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) msg->rsp[2] = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) msg->rsp_size = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) /* It's an error, so it will never requeue, no need to check return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) handle_one_recv_msg(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) static void cleanup_smi_msgs(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) struct seq_table *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) struct ipmi_smi_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) struct list_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) struct list_head tmplist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) /* Clear out our transmit queues and hold the messages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) INIT_LIST_HEAD(&tmplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) list_splice_tail(&intf->xmit_msgs, &tmplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) /* Current message first, to preserve order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) /* Wait for the message to clear out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) schedule_timeout(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) /* No need for locks, the interface is down. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) * Return errors for all pending messages in queue and in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) * tables waiting for remote responses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) while (!list_empty(&tmplist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) entry = tmplist.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) list_del(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) msg = list_entry(entry, struct ipmi_smi_msg, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) ent = &intf->seq_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) if (!ent->inuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) void ipmi_unregister_smi(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) struct ipmi_smi_watcher *w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) int intf_num = intf->intf_num, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) mutex_lock(&ipmi_interfaces_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) intf->intf_num = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) intf->in_shutdown = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) list_del_rcu(&intf->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) mutex_unlock(&ipmi_interfaces_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) synchronize_srcu(&ipmi_interfaces_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) /* At this point no users can be added to the interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) * Call all the watcher interfaces to tell them that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) * an interface is going away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) mutex_lock(&smi_watchers_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) list_for_each_entry(w, &smi_watchers, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) w->smi_gone(intf_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) mutex_unlock(&smi_watchers_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) index = srcu_read_lock(&intf->users_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) while (!list_empty(&intf->users)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) struct ipmi_user *user =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) container_of(list_next_rcu(&intf->users),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) struct ipmi_user, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) _ipmi_destroy_user(user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) srcu_read_unlock(&intf->users_srcu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) if (intf->handlers->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) intf->handlers->shutdown(intf->send_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) cleanup_smi_msgs(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) ipmi_bmc_unregister(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) cleanup_srcu_struct(&intf->users_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) kref_put(&intf->refcount, intf_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) EXPORT_SYMBOL(ipmi_unregister_smi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) struct ipmi_ipmb_addr ipmb_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) struct ipmi_recv_msg *recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) * This is 11, not 10, because the response must contain a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) * completion code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) if (msg->rsp_size < 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) /* Message not big enough, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) ipmi_inc_stat(intf, invalid_ipmb_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) if (msg->rsp[2] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) /* An error getting the response, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) ipmb_addr.slave_addr = msg->rsp[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) ipmb_addr.channel = msg->rsp[3] & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) ipmb_addr.lun = msg->rsp[7] & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) * It's a response from a remote entity. Look up the sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) * number and handle the response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) if (intf_find_seq(intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) msg->rsp[7] >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) msg->rsp[3] & 0x0f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) msg->rsp[8],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) (msg->rsp[4] >> 2) & (~1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) (struct ipmi_addr *) &ipmb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) &recv_msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) * We were unable to find the sequence number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) * so just nuke the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) ipmi_inc_stat(intf, unhandled_ipmb_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) * The other fields matched, so no need to set them, except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) * for netfn, which needs to be the response that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) * returned, not the request value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) recv_msg->msg.netfn = msg->rsp[4] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) recv_msg->msg.data = recv_msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) recv_msg->msg.data_len = msg->rsp_size - 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) if (deliver_response(intf, recv_msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) ipmi_inc_stat(intf, unhandled_ipmb_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) ipmi_inc_stat(intf, handled_ipmb_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) struct cmd_rcvr *rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) unsigned char netfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) unsigned char cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) unsigned char chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) struct ipmi_user *user = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) struct ipmi_ipmb_addr *ipmb_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) struct ipmi_recv_msg *recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) if (msg->rsp_size < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) /* Message not big enough, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) ipmi_inc_stat(intf, invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) if (msg->rsp[2] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) /* An error getting the response, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) netfn = msg->rsp[4] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) cmd = msg->rsp[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) chan = msg->rsp[3] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) if (rcvr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) user = rcvr->user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) kref_get(&user->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) user = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) if (user == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) /* We didn't find a user, deliver an error response. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) ipmi_inc_stat(intf, unhandled_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) msg->data[1] = IPMI_SEND_MSG_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) msg->data[2] = msg->rsp[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) msg->data[3] = msg->rsp[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) msg->data[5] = ipmb_checksum(&msg->data[3], 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) /* rqseq/lun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) msg->data[8] = msg->rsp[8]; /* cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) msg->data[10] = ipmb_checksum(&msg->data[6], 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) msg->data_size = 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) if (!intf->in_shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) smi_send(intf, intf->handlers, msg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) * We used the message, so return the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) * that causes it to not be freed or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) * queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) rv = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) recv_msg = ipmi_alloc_recv_msg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) if (!recv_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) * We couldn't allocate memory for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) * message, so requeue it for handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) * later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) kref_put(&user->refcount, free_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) /* Extract the source address from the data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) ipmb_addr->slave_addr = msg->rsp[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) ipmb_addr->lun = msg->rsp[7] & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) ipmb_addr->channel = msg->rsp[3] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) * Extract the rest of the message information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) * from the IPMB header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) recv_msg->user = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) recv_msg->msgid = msg->rsp[7] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) recv_msg->msg.netfn = msg->rsp[4] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) recv_msg->msg.cmd = msg->rsp[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) recv_msg->msg.data = recv_msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) * We chop off 10, not 9 bytes because the checksum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) * at the end also needs to be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) recv_msg->msg.data_len = msg->rsp_size - 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) memcpy(recv_msg->msg_data, &msg->rsp[9],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) msg->rsp_size - 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) if (deliver_response(intf, recv_msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) ipmi_inc_stat(intf, unhandled_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) ipmi_inc_stat(intf, handled_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) struct ipmi_lan_addr lan_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) struct ipmi_recv_msg *recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) * This is 13, not 12, because the response must contain a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) * completion code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) if (msg->rsp_size < 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) /* Message not big enough, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) ipmi_inc_stat(intf, invalid_lan_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) if (msg->rsp[2] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) /* An error getting the response, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) lan_addr.session_handle = msg->rsp[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) lan_addr.remote_SWID = msg->rsp[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) lan_addr.local_SWID = msg->rsp[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) lan_addr.channel = msg->rsp[3] & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) lan_addr.privilege = msg->rsp[3] >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) lan_addr.lun = msg->rsp[9] & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) * It's a response from a remote entity. Look up the sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) * number and handle the response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) if (intf_find_seq(intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) msg->rsp[9] >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) msg->rsp[3] & 0x0f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) msg->rsp[10],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) (msg->rsp[6] >> 2) & (~1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) (struct ipmi_addr *) &lan_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) &recv_msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) * We were unable to find the sequence number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) * so just nuke the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) ipmi_inc_stat(intf, unhandled_lan_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) * The other fields matched, so no need to set them, except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) * for netfn, which needs to be the response that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) * returned, not the request value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) recv_msg->msg.netfn = msg->rsp[6] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) recv_msg->msg.data = recv_msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) recv_msg->msg.data_len = msg->rsp_size - 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) if (deliver_response(intf, recv_msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) ipmi_inc_stat(intf, unhandled_lan_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) ipmi_inc_stat(intf, handled_lan_responses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) struct cmd_rcvr *rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) unsigned char netfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) unsigned char cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) unsigned char chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) struct ipmi_user *user = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) struct ipmi_lan_addr *lan_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) struct ipmi_recv_msg *recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) if (msg->rsp_size < 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) /* Message not big enough, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) ipmi_inc_stat(intf, invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) if (msg->rsp[2] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) /* An error getting the response, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) netfn = msg->rsp[6] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) cmd = msg->rsp[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) chan = msg->rsp[3] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) if (rcvr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) user = rcvr->user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) kref_get(&user->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) user = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) if (user == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) /* We didn't find a user, just give up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) ipmi_inc_stat(intf, unhandled_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) * Don't do anything with these messages, just allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) * them to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) recv_msg = ipmi_alloc_recv_msg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) if (!recv_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) * We couldn't allocate memory for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) * message, so requeue it for handling later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) kref_put(&user->refcount, free_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) /* Extract the source address from the data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) lan_addr->session_handle = msg->rsp[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) lan_addr->remote_SWID = msg->rsp[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) lan_addr->local_SWID = msg->rsp[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) lan_addr->lun = msg->rsp[9] & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) lan_addr->channel = msg->rsp[3] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) lan_addr->privilege = msg->rsp[3] >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) * Extract the rest of the message information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) * from the IPMB header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) recv_msg->user = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) recv_msg->msgid = msg->rsp[9] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) recv_msg->msg.netfn = msg->rsp[6] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) recv_msg->msg.cmd = msg->rsp[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) recv_msg->msg.data = recv_msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) * We chop off 12, not 11 bytes because the checksum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) * at the end also needs to be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) recv_msg->msg.data_len = msg->rsp_size - 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) memcpy(recv_msg->msg_data, &msg->rsp[11],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) msg->rsp_size - 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) if (deliver_response(intf, recv_msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) ipmi_inc_stat(intf, unhandled_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) ipmi_inc_stat(intf, handled_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) * This routine will handle "Get Message" command responses with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) * channels that use an OEM Medium. The message format belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) * the OEM. See IPMI 2.0 specification, Chapter 6 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) * Chapter 22, sections 22.6 and 22.24 for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) struct cmd_rcvr *rcvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) unsigned char netfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) unsigned char cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) unsigned char chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) struct ipmi_user *user = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) struct ipmi_system_interface_addr *smi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) struct ipmi_recv_msg *recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) * We expect the OEM SW to perform error checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) * so we just do some basic sanity checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) if (msg->rsp_size < 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) /* Message not big enough, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) ipmi_inc_stat(intf, invalid_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) if (msg->rsp[2] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) /* An error getting the response, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) * This is an OEM Message so the OEM needs to know how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) * handle the message. We do no interpretation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) netfn = msg->rsp[0] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) cmd = msg->rsp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) chan = msg->rsp[3] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) if (rcvr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) user = rcvr->user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) kref_get(&user->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) user = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) if (user == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) /* We didn't find a user, just give up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) ipmi_inc_stat(intf, unhandled_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) * Don't do anything with these messages, just allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) * them to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) recv_msg = ipmi_alloc_recv_msg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) if (!recv_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) * We couldn't allocate memory for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) * message, so requeue it for handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) * later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) kref_put(&user->refcount, free_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) * OEM Messages are expected to be delivered via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) * the system interface to SMS software. We might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) * need to visit this again depending on OEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) * requirements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) smi_addr = ((struct ipmi_system_interface_addr *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) &recv_msg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) smi_addr->channel = IPMI_BMC_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) smi_addr->lun = msg->rsp[0] & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) recv_msg->user = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) recv_msg->user_msg_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) recv_msg->msg.netfn = msg->rsp[0] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) recv_msg->msg.cmd = msg->rsp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) recv_msg->msg.data = recv_msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) * The message starts at byte 4 which follows the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) * the Channel Byte in the "GET MESSAGE" command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) recv_msg->msg.data_len = msg->rsp_size - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) memcpy(recv_msg->msg_data, &msg->rsp[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) msg->rsp_size - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) if (deliver_response(intf, recv_msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) ipmi_inc_stat(intf, unhandled_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) ipmi_inc_stat(intf, handled_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) struct ipmi_system_interface_addr *smi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) recv_msg->msgid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) smi_addr->channel = IPMI_BMC_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) smi_addr->lun = msg->rsp[0] & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) recv_msg->msg.netfn = msg->rsp[0] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) recv_msg->msg.cmd = msg->rsp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) recv_msg->msg.data = recv_msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) recv_msg->msg.data_len = msg->rsp_size - 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) static int handle_read_event_rsp(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) struct ipmi_recv_msg *recv_msg, *recv_msg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) struct list_head msgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) struct ipmi_user *user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) int rv = 0, deliver_count = 0, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) if (msg->rsp_size < 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) /* Message is too small to be an IPMB event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) ipmi_inc_stat(intf, invalid_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) if (msg->rsp[2] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) /* An error getting the event, just ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) INIT_LIST_HEAD(&msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) spin_lock_irqsave(&intf->events_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) ipmi_inc_stat(intf, events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) * Allocate and fill in one message for every user that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) * getting events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) index = srcu_read_lock(&intf->users_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) list_for_each_entry_rcu(user, &intf->users, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) if (!user->gets_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) recv_msg = ipmi_alloc_recv_msg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) if (!recv_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) list_del(&recv_msg->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) ipmi_free_recv_msg(recv_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) * We couldn't allocate memory for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) * message, so requeue it for handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) * later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) deliver_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) copy_event_into_recv_msg(recv_msg, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) recv_msg->user = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) kref_get(&user->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) list_add_tail(&recv_msg->link, &msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) srcu_read_unlock(&intf->users_srcu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) if (deliver_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) /* Now deliver all the messages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) list_del(&recv_msg->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) deliver_local_response(intf, recv_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) * No one to receive the message, put it in queue if there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) * not already too many things in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) recv_msg = ipmi_alloc_recv_msg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) if (!recv_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) * We couldn't allocate memory for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) * message, so requeue it for handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) * later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) copy_event_into_recv_msg(recv_msg, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) list_add_tail(&recv_msg->link, &intf->waiting_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) intf->waiting_events_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) } else if (!intf->event_msg_printed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) * There's too many things in the queue, discard this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) * message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) dev_warn(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) "Event queue full, discarding incoming events\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) intf->event_msg_printed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) spin_unlock_irqrestore(&intf->events_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) static int handle_bmc_rsp(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) struct ipmi_recv_msg *recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) struct ipmi_system_interface_addr *smi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) recv_msg = (struct ipmi_recv_msg *) msg->user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) if (recv_msg == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) dev_warn(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) recv_msg->msgid = msg->msgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) smi_addr = ((struct ipmi_system_interface_addr *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) &recv_msg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) smi_addr->channel = IPMI_BMC_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) smi_addr->lun = msg->rsp[0] & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) recv_msg->msg.netfn = msg->rsp[0] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) recv_msg->msg.cmd = msg->rsp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) recv_msg->msg.data = recv_msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) recv_msg->msg.data_len = msg->rsp_size - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) deliver_local_response(intf, recv_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) * Handle a received message. Return 1 if the message should be requeued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) * 0 if the message should be freed, or -1 if the message should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) * be freed or requeued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) static int handle_one_recv_msg(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) int requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) int chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) if ((msg->data_size >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) && (msg->data[1] == IPMI_SEND_MSG_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) && (msg->user_data == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) if (intf->in_shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) goto free_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) * This is the local response to a command send, start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) * the timer for these. The user_data will not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) * NULL if this is a response send, and we will let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) * response sends just go through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) * Check for errors, if we get certain errors (ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) * that mean basically we can try again later), we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) * ignore them and start the timer. Otherwise we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) * report the error immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) && (msg->rsp[2] != IPMI_BUS_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) int ch = msg->rsp[3] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) struct ipmi_channel *chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) /* Got an error sending the message, handle it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) chans = READ_ONCE(intf->channel_list)->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) ipmi_inc_stat(intf, sent_lan_command_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) ipmi_inc_stat(intf, sent_ipmb_command_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) intf_err_seq(intf, msg->msgid, msg->rsp[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) /* The message was sent, start the timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) intf_start_seq_timer(intf, msg->msgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) free_msg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) requeue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) } else if (msg->rsp_size < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) /* Message is too small to be correct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) dev_warn(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) /* Generate an error response for the message. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) msg->rsp[0] = msg->data[0] | (1 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) msg->rsp[1] = msg->data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) msg->rsp_size = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) || (msg->rsp[1] != msg->data[1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) * The NetFN and Command in the response is not even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) * marginally correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) dev_warn(intf->si_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) (msg->data[0] >> 2) | 1, msg->data[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) msg->rsp[0] >> 2, msg->rsp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) /* Generate an error response for the message. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) msg->rsp[0] = msg->data[0] | (1 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) msg->rsp[1] = msg->data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) msg->rsp_size = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) && (msg->user_data != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) * It's a response to a response we sent. For this we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) * deliver a send message response to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) struct ipmi_recv_msg *recv_msg = msg->user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) requeue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) if (msg->rsp_size < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) /* Message is too small to be correct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) chan = msg->data[2] & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) if (chan >= IPMI_MAX_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) /* Invalid channel number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) if (!recv_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) recv_msg->msg.data = recv_msg->msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) recv_msg->msg.data_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) recv_msg->msg_data[0] = msg->rsp[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) deliver_local_response(intf, recv_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) struct ipmi_channel *chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) /* It's from the receive queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) chan = msg->rsp[3] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) if (chan >= IPMI_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) /* Invalid channel number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) requeue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) * We need to make sure the channels have been initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) * The channel_handler routine will set the "curr_channel"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) * equal to or greater than IPMI_MAX_CHANNELS when all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) * channels for this interface have been initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) if (!intf->channels_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) requeue = 0; /* Throw the message away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) chans = READ_ONCE(intf->channel_list)->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) switch (chans[chan].medium) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) case IPMI_CHANNEL_MEDIUM_IPMB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) if (msg->rsp[4] & 0x04) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) * It's a response, so find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) * requesting message and send it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) requeue = handle_ipmb_get_msg_rsp(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) * It's a command to the SMS from some other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) * entity. Handle that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) requeue = handle_ipmb_get_msg_cmd(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) case IPMI_CHANNEL_MEDIUM_8023LAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) case IPMI_CHANNEL_MEDIUM_ASYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) if (msg->rsp[6] & 0x04) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) * It's a response, so find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) * requesting message and send it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) requeue = handle_lan_get_msg_rsp(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) * It's a command to the SMS from some other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) * entity. Handle that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) requeue = handle_lan_get_msg_cmd(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) /* Check for OEM Channels. Clients had better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) register for these commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) && (chans[chan].medium
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) requeue = handle_oem_get_msg_cmd(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) * We don't handle the channel type, so just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) * free the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) requeue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) /* It's an asynchronous event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) requeue = handle_read_event_rsp(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) /* It's a response from the local BMC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) requeue = handle_bmc_rsp(intf, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) return requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) * If there are messages in the queue or pretimeouts, handle them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) static void handle_new_recv_msgs(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) struct ipmi_smi_msg *smi_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) int run_to_completion = intf->run_to_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) /* See if any waiting messages need to be processed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) while (!list_empty(&intf->waiting_rcv_msgs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) smi_msg = list_entry(intf->waiting_rcv_msgs.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) struct ipmi_smi_msg, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) list_del(&smi_msg->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) rv = handle_one_recv_msg(intf, smi_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) if (rv > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) * To preserve message order, quit if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) * can't handle a message. Add the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) * back at the head, this is safe because this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) * tasklet is the only thing that pulls the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) * messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) if (rv == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) /* Message handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) ipmi_free_smi_msg(smi_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) /* If rv < 0, fatal error, del but don't free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) * If the pretimout count is non-zero, decrement one from it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) * deliver pretimeouts to all the users.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) struct ipmi_user *user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) index = srcu_read_lock(&intf->users_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) list_for_each_entry_rcu(user, &intf->users, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) if (user->handler->ipmi_watchdog_pretimeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) user->handler->ipmi_watchdog_pretimeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) user->handler_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) srcu_read_unlock(&intf->users_srcu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) static void smi_recv_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) unsigned long flags = 0; /* keep us warning-free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) int run_to_completion = intf->run_to_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) struct ipmi_smi_msg *newmsg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) * Start the next message if available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) * Do this here, not in the actual receiver, because we may deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) * because the lower layer is allowed to hold locks while calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) * message delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) if (intf->curr_msg == NULL && !intf->in_shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) struct list_head *entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) /* Pick the high priority queue first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) if (!list_empty(&intf->hp_xmit_msgs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) entry = intf->hp_xmit_msgs.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) else if (!list_empty(&intf->xmit_msgs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) entry = intf->xmit_msgs.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) list_del(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) newmsg = list_entry(entry, struct ipmi_smi_msg, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) intf->curr_msg = newmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) if (newmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) intf->handlers->sender(intf->send_info, newmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) handle_new_recv_msgs(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) /* Handle a new message from the lower layer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) void ipmi_smi_msg_received(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) unsigned long flags = 0; /* keep us warning-free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) int run_to_completion = intf->run_to_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) * To preserve message order, we keep a queue and deliver from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) * a tasklet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) * We can get an asynchronous event or receive message in addition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) * to commands we send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) if (msg == intf->curr_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) intf->curr_msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) if (!run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) if (run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) smi_recv_tasklet(&intf->recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) tasklet_schedule(&intf->recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) EXPORT_SYMBOL(ipmi_smi_msg_received);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) if (intf->in_shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) tasklet_schedule(&intf->recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) static struct ipmi_smi_msg *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) unsigned char seq, long seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) if (!smi_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) * If we can't allocate the message, then just return, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) * get 4 retries, so this should be ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) smi_msg->data_size = recv_msg->msg.data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) return smi_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) struct list_head *timeouts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) unsigned long timeout_period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) int slot, unsigned long *flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) bool *need_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) struct ipmi_recv_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) if (intf->in_shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) if (!ent->inuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) if (timeout_period < ent->timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) ent->timeout -= timeout_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) *need_timer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) if (ent->retries_left == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) /* The message has used all its retries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) ent->inuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) msg = ent->recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) list_add_tail(&msg->link, timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) if (ent->broadcast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) else if (is_lan_addr(&ent->recv_msg->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) ipmi_inc_stat(intf, timed_out_lan_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) ipmi_inc_stat(intf, timed_out_ipmb_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) struct ipmi_smi_msg *smi_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) /* More retries, send again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) *need_timer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) * Start with the max timer, set to normal timer after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) * the message is sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) ent->timeout = MAX_MSG_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) ent->retries_left--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) ent->seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) if (!smi_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) if (is_lan_addr(&ent->recv_msg->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) ipmi_inc_stat(intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) dropped_rexmit_lan_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) ipmi_inc_stat(intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) dropped_rexmit_ipmb_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) spin_unlock_irqrestore(&intf->seq_lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) * Send the new message. We send with a zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) * priority. It timed out, I doubt time is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) * critical now, and high priority messages are really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) * only for messages to the local MC, which don't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) * resent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) if (intf->handlers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) if (is_lan_addr(&ent->recv_msg->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) ipmi_inc_stat(intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) retransmitted_lan_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) ipmi_inc_stat(intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) retransmitted_ipmb_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) smi_send(intf, intf->handlers, smi_msg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) ipmi_free_smi_msg(smi_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) spin_lock_irqsave(&intf->seq_lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) static bool ipmi_timeout_handler(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) unsigned long timeout_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) struct list_head timeouts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) struct ipmi_recv_msg *msg, *msg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) bool need_timer = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) if (!intf->bmc_registered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) kref_get(&intf->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) if (!schedule_work(&intf->bmc_reg_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) kref_put(&intf->refcount, intf_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) need_timer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) * Go through the seq table and find any messages that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) * have timed out, putting them in the timeouts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) INIT_LIST_HEAD(&timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) spin_lock_irqsave(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) if (intf->ipmb_maintenance_mode_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) intf->ipmb_maintenance_mode_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) intf->ipmb_maintenance_mode_timeout -= timeout_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) check_msg_timeout(intf, &intf->seq_table[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) &timeouts, timeout_period, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) &flags, &need_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) spin_unlock_irqrestore(&intf->seq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) list_for_each_entry_safe(msg, msg2, &timeouts, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) * Maintenance mode handling. Check the timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) * optimistically before we claim the lock. It may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) * mean a timeout gets missed occasionally, but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) * only means the timeout gets extended by one period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) * in that case. No big deal, and it avoids the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) * most of the time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) if (intf->auto_maintenance_timeout > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) if (intf->auto_maintenance_timeout > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) intf->auto_maintenance_timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) -= timeout_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) if (!intf->maintenance_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) && (intf->auto_maintenance_timeout <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) intf->maintenance_mode_enable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) maintenance_mode_update(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) spin_unlock_irqrestore(&intf->maintenance_mode_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) tasklet_schedule(&intf->recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) return need_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) static void ipmi_request_event(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) /* No event requests when in maintenance mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) if (intf->maintenance_mode_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) if (!intf->in_shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) intf->handlers->request_events(intf->send_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) static struct timer_list ipmi_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) static atomic_t stop_operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) static void ipmi_timeout(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) struct ipmi_smi *intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) bool need_timer = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) if (atomic_read(&stop_operation))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) index = srcu_read_lock(&ipmi_interfaces_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) if (atomic_read(&intf->event_waiters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) intf->ticks_to_req_ev--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) if (intf->ticks_to_req_ev == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) ipmi_request_event(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) need_timer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) srcu_read_unlock(&ipmi_interfaces_srcu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) if (need_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) static void need_waiter(struct ipmi_smi *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) /* Racy, but worst case we start the timer twice. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) if (!timer_pending(&ipmi_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) static void free_smi_msg(struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) atomic_dec(&smi_msg_inuse_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) /* Try to keep as much stuff out of the panic path as possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) if (!oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) kfree(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) struct ipmi_smi_msg *rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) rv->done = free_smi_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) rv->user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) atomic_inc(&smi_msg_inuse_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) EXPORT_SYMBOL(ipmi_alloc_smi_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) static void free_recv_msg(struct ipmi_recv_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) atomic_dec(&recv_msg_inuse_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) /* Try to keep as much stuff out of the panic path as possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) if (!oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) kfree(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) struct ipmi_recv_msg *rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) rv->user = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) rv->done = free_recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) atomic_inc(&recv_msg_inuse_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) if (msg->user && !oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) kref_put(&msg->user->refcount, free_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) msg->done(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) EXPORT_SYMBOL(ipmi_free_recv_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) static atomic_t panic_done_count = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) atomic_dec(&panic_done_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) atomic_dec(&panic_done_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) * Inside a panic, send a message and wait for a response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) struct ipmi_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) struct kernel_ipmi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) struct ipmi_smi_msg smi_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) struct ipmi_recv_msg recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) smi_msg.done = dummy_smi_done_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) recv_msg.done = dummy_recv_done_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) atomic_add(2, &panic_done_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) rv = i_ipmi_request(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) &smi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) &recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) intf->addrinfo[0].address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) intf->addrinfo[0].lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) 0, 1); /* Don't retry, and don't wait. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) atomic_sub(2, &panic_done_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) else if (intf->handlers->flush_messages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) intf->handlers->flush_messages(intf->send_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) while (atomic_read(&panic_done_count) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) ipmi_poll(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) static void event_receiver_fetcher(struct ipmi_smi *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) struct ipmi_recv_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) /* A get event receiver command, save it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) intf->event_receiver = msg->msg.data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) intf->event_receiver_lun = msg->msg.data[2] & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) * A get device id command, save if we are an event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) * receiver or generator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) static void send_panic_events(struct ipmi_smi *intf, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) struct kernel_ipmi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) unsigned char data[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) struct ipmi_system_interface_addr *si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) struct ipmi_addr addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) char *p = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) struct ipmi_ipmb_addr *ipmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) si = (struct ipmi_system_interface_addr *) &addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) si->channel = IPMI_BMC_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) si->lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) /* Fill in an event telling that we have failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) msg.netfn = 0x04; /* Sensor or Event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) msg.cmd = 2; /* Platform event command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) msg.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) msg.data_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) data[1] = 0x03; /* This is for IPMI 1.0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) * Put a few breadcrumbs in. Hopefully later we can add more things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) * to make the panic events more useful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) if (str) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) data[3] = str[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) data[6] = str[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) data[7] = str[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) /* Send the event announcing the panic. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) ipmi_panic_request_and_wait(intf, &addr, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) * On every interface, dump a bunch of OEM event holding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) * string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) * intf_num is used as an marker to tell if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) * interface is valid. Thus we need a read barrier to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) * make sure data fetched before checking intf_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) * won't be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) * First job here is to figure out where to send the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) * OEM events. There's no way in IPMI to send OEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) * events using an event send command, so we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) * find the SEL to put them in and stick them in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) * there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) /* Get capabilities from the get device id. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) intf->local_sel_device = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) intf->local_event_generator = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) intf->event_receiver = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) /* Request the device info from the local MC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) msg.netfn = IPMI_NETFN_APP_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) msg.cmd = IPMI_GET_DEVICE_ID_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) msg.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) msg.data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) intf->null_user_handler = device_id_fetcher;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) ipmi_panic_request_and_wait(intf, &addr, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) if (intf->local_event_generator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) /* Request the event receiver from the local MC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) msg.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) msg.data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) intf->null_user_handler = event_receiver_fetcher;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) ipmi_panic_request_and_wait(intf, &addr, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) intf->null_user_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) * Validate the event receiver. The low bit must not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) * be 1 (it must be a valid IPMB address), it cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) * be zero, and it must not be my address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) if (((intf->event_receiver & 1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) && (intf->event_receiver != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) && (intf->event_receiver != intf->addrinfo[0].address)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) * The event receiver is valid, send an IPMB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) * message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) ipmb = (struct ipmi_ipmb_addr *) &addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) ipmb->channel = 0; /* FIXME - is this right? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) ipmb->lun = intf->event_receiver_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) ipmb->slave_addr = intf->event_receiver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) } else if (intf->local_sel_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) * The event receiver was not valid (or was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) * me), but I am an SEL device, just dump it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) * in my SEL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) si = (struct ipmi_system_interface_addr *) &addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) si->channel = IPMI_BMC_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) si->lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) return; /* No where to send the event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) msg.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) msg.data_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) int size = strlen(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) if (size > 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) size = 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) data[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) data[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) data[2] = 0xf0; /* OEM event without timestamp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) data[3] = intf->addrinfo[0].address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) data[4] = j++; /* sequence # */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) * Always give 11 bytes, so strncpy will fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) * it with zeroes for me.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) strncpy(data+5, p, 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) p += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) ipmi_panic_request_and_wait(intf, &addr, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) static int has_panicked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) static int panic_event(struct notifier_block *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) struct ipmi_smi *intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) struct ipmi_user *user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) if (has_panicked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) has_panicked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) /* For every registered interface, set it to run to completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) if (!intf->handlers || intf->intf_num == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) /* Interface is not ready. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) if (!intf->handlers->poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) * If we were interrupted while locking xmit_msgs_lock or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) * waiting_rcv_msgs_lock, the corresponding list may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) * corrupted. In this case, drop items on the list for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) * the safety.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) if (!spin_trylock(&intf->xmit_msgs_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) INIT_LIST_HEAD(&intf->xmit_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) INIT_LIST_HEAD(&intf->hp_xmit_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) spin_unlock(&intf->xmit_msgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) spin_unlock(&intf->waiting_rcv_msgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) intf->run_to_completion = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) if (intf->handlers->set_run_to_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) intf->handlers->set_run_to_completion(intf->send_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) list_for_each_entry_rcu(user, &intf->users, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) if (user->handler->ipmi_panic_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) user->handler->ipmi_panic_handler(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) user->handler_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) send_panic_events(intf, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) /* Must be called with ipmi_interfaces_mutex held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) static int ipmi_register_driver(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) if (drvregistered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) rv = driver_register(&ipmidriver.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) pr_err("Could not register IPMI driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) drvregistered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) static struct notifier_block panic_block = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) .notifier_call = panic_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) .next = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) .priority = 200 /* priority: INT_MAX >= x >= 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) static int ipmi_init_msghandler(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) mutex_lock(&ipmi_interfaces_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) rv = ipmi_register_driver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) if (initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) rv = init_srcu_struct(&ipmi_interfaces_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) if (!remove_work_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) goto out_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) timer_setup(&ipmi_timer, ipmi_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) out_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) cleanup_srcu_struct(&ipmi_interfaces_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) mutex_unlock(&ipmi_interfaces_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) static int __init ipmi_init_msghandler_mod(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) pr_info("version " IPMI_DRIVER_VERSION "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) mutex_lock(&ipmi_interfaces_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) rv = ipmi_register_driver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) mutex_unlock(&ipmi_interfaces_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) static void __exit cleanup_ipmi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) if (initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) destroy_workqueue(remove_work_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) atomic_notifier_chain_unregister(&panic_notifier_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) &panic_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) * This can't be called if any interfaces exist, so no worry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) * about shutting down the interfaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) * Tell the timer to stop, then wait for it to stop. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) * avoids problems with race conditions removing the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) atomic_set(&stop_operation, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) del_timer_sync(&ipmi_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) initialized = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) /* Check for buffer leaks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) count = atomic_read(&smi_msg_inuse_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) if (count != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) pr_warn("SMI message count %d at exit\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) count = atomic_read(&recv_msg_inuse_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) if (count != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) pr_warn("recv message count %d at exit\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) cleanup_srcu_struct(&ipmi_interfaces_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) if (drvregistered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) driver_unregister(&ipmidriver.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) module_exit(cleanup_ipmi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) module_init(ipmi_init_msghandler_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) " interface.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) MODULE_VERSION(IPMI_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) MODULE_SOFTDEP("post: ipmi_devintf");