^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2010 - 2015 UNISYS CORPORATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/visorbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "visorbus_private.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /* {72120008-4AAB-11DC-8530-444553544200} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define POLLJIFFIES_CONTROLVM_FAST 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define POLLJIFFIES_CONTROLVM_SLOW 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define UNISYS_VISOR_LEAF_ID 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define UNISYS_VISOR_ID_EBX 0x73696e55
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define UNISYS_VISOR_ID_ECX 0x70537379
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define UNISYS_VISOR_ID_EDX 0x34367261
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * to slow polling mode. As soon as we get a controlvm message, we switch back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * to fast polling mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define MIN_IDLE_SECONDS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct parser_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long allocbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long param_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u8 *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long bytes_remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) bool byte_stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct visor_controlvm_parameters_header data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define VMCALL_CONTROLVM_ADDR 0x0501
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) enum vmcall_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) VMCALL_RESULT_SUCCESS = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) VMCALL_RESULT_INVALID_PARAM = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) VMCALL_RESULT_DATA_UNAVAILABLE = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) VMCALL_RESULT_DEVICE_ERROR = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) VMCALL_RESULT_DEVICE_NOT_READY = 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * parameters to VMCALL_CONTROLVM_ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @address: The Guest-relative physical address of the ControlVm channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * This VMCall fills this in with the appropriate address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Contents provided by this VMCALL (OUT).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * this in with the appropriate address. Contents provided by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * this VMCALL (OUT).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @unused: Unused Bytes in the 64-Bit Aligned Struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct vmcall_io_controlvm_addr_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u64 address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 channel_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u8 unused[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct visorchipset_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct acpi_device *acpi_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long poll_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* when we got our last controlvm message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned long most_recent_message_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct delayed_work periodic_controlvm_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct visorchannel *controlvm_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned long controlvm_payload_bytes_buffered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * The following variables are used to handle the scenario where we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * unable to offload the payload from a controlvm message due to memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * requirements. In this scenario, we simply stash the controlvm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * message, then attempt to process it again the next time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * controlvm_periodic_work() runs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct controlvm_message controlvm_pending_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) bool controlvm_pending_msg_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct vmcall_io_controlvm_addr_params controlvm_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static struct visorchipset_device *chipset_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct parahotplug_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned long expiration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct controlvm_message msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* prototypes for attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static ssize_t toolaction_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u8 tool_action = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) err = visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) tool_action),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) &tool_action, sizeof(u8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return sprintf(buf, "%u\n", tool_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static ssize_t toolaction_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u8 tool_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (kstrtou8(buf, 10, &tool_action))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) err = visorchannel_write(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) tool_action),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) &tool_action, sizeof(u8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static DEVICE_ATTR_RW(toolaction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static ssize_t boottotool_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct efi_visor_indication efi_visor_indication;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) err = visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) efi_visor_ind),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) &efi_visor_indication,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) sizeof(struct efi_visor_indication));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static ssize_t boottotool_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int val, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct efi_visor_indication efi_visor_indication;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (kstrtoint(buf, 10, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) efi_visor_indication.boot_to_tool = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) err = visorchannel_write(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) efi_visor_ind),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) &(efi_visor_indication),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) sizeof(struct efi_visor_indication));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static DEVICE_ATTR_RW(boottotool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static ssize_t error_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u32 error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) err = visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) installation_error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) &error, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return sprintf(buf, "%u\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static ssize_t error_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u32 error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (kstrtou32(buf, 10, &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) err = visorchannel_write(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) installation_error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) &error, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static DEVICE_ATTR_RW(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u32 text_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) err = visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) installation_text_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) &text_id, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return sprintf(buf, "%u\n", text_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u32 text_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (kstrtou32(buf, 10, &text_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) err = visorchannel_write(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) installation_text_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) &text_id, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static DEVICE_ATTR_RW(textid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static ssize_t remaining_steps_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u16 remaining_steps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) err = visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) installation_remaining_steps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) &remaining_steps, sizeof(u16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return sprintf(buf, "%hu\n", remaining_steps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static ssize_t remaining_steps_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u16 remaining_steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (kstrtou16(buf, 10, &remaining_steps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) err = visorchannel_write(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) installation_remaining_steps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) &remaining_steps, sizeof(u16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static DEVICE_ATTR_RW(remaining_steps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static void controlvm_init_response(struct controlvm_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct controlvm_message_header *msg_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int response)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) memset(msg, 0, sizeof(struct controlvm_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) msg->hdr.payload_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) msg->hdr.payload_vm_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) msg->hdr.payload_max_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (response < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) msg->hdr.flags.failed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) msg->hdr.completion_status = (u32)(-response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int controlvm_respond_chipset_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct controlvm_message_header *msg_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) enum visor_chipset_feature features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct controlvm_message outmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) controlvm_init_response(&outmsg, msg_hdr, response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) outmsg.cmd.init_chipset.features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return visorchannel_signalinsert(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) CONTROLVM_QUEUE_REQUEST, &outmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int chipset_init(struct controlvm_message *inmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static int chipset_inited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) enum visor_chipset_feature features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int rc = CONTROLVM_RESP_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (chipset_inited) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) rc = -CONTROLVM_RESP_ALREADY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) res = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) goto out_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) chipset_inited = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * Set features to indicate we support parahotplug (if Command also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * supports it). Set the "reply" bit so Command knows this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * features-aware driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) features = inmsg->cmd.init_chipset.features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) features |= VISOR_CHIPSET_FEATURE_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) out_respond:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (inmsg->hdr.flags.response_expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int controlvm_respond(struct controlvm_message_header *msg_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int response, struct visor_segment_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct controlvm_message outmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) controlvm_init_response(&outmsg, msg_hdr, response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (outmsg.hdr.flags.test_message == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) outmsg.cmd.device_change_state.state = *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) outmsg.cmd.device_change_state.flags.phys_device = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return visorchannel_signalinsert(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) CONTROLVM_QUEUE_REQUEST, &outmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) enum crash_obj_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) CRASH_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) CRASH_BUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int save_crash_message(struct controlvm_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) enum crash_obj_type cr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) u32 local_crash_msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) u16 local_crash_msg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) err = visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) saved_crash_message_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) &local_crash_msg_count, sizeof(u16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) "failed to read message count\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) "invalid number of messages\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) err = visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) saved_crash_message_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) &local_crash_msg_offset, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) "failed to read offset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) switch (cr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) case CRASH_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) local_crash_msg_offset += sizeof(struct controlvm_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) err = visorchannel_write(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) local_crash_msg_offset, msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) sizeof(struct controlvm_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) "failed to write dev msg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) case CRASH_BUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) err = visorchannel_write(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) local_crash_msg_offset, msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) sizeof(struct controlvm_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) "failed to write bus msg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) "Invalid crash_obj_type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static int controlvm_responder(enum controlvm_id cmd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct controlvm_message_header *pending_msg_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int response)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (pending_msg_hdr->id != (u32)cmd_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return controlvm_respond(pending_msg_hdr, response, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int device_changestate_responder(enum controlvm_id cmd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct visor_device *p, int response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct visor_segment_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct controlvm_message outmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (p->pending_msg_hdr->id != cmd_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) outmsg.cmd.device_change_state.state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return visorchannel_signalinsert(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) CONTROLVM_QUEUE_REQUEST, &outmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static int visorbus_create(struct controlvm_message *inmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct controlvm_message_packet *cmd = &inmsg->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct controlvm_message_header *pmsg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) u32 bus_no = cmd->create_bus.bus_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct visor_device *bus_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct visorchannel *visorchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (bus_info && bus_info->state.created == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) "failed %s: already exists\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!bus_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) INIT_LIST_HEAD(&bus_info->list_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) bus_info->chipset_bus_no = bus_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) err = save_crash_message(inmsg, CRASH_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) goto err_free_bus_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (inmsg->hdr.flags.response_expected == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!pmsg_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) goto err_free_bus_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) memcpy(pmsg_hdr, &inmsg->hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) sizeof(struct controlvm_message_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) bus_info->pending_msg_hdr = pmsg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) &cmd->create_bus.bus_data_type_guid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!visorchannel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) goto err_free_pending_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) bus_info->visorchannel = visorchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* Response will be handled by visorbus_create_instance on success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) err = visorbus_create_instance(bus_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) goto err_destroy_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) err_destroy_channel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) visorchannel_destroy(visorchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) err_free_pending_msg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) kfree(bus_info->pending_msg_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) err_free_bus_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) kfree(bus_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) err_respond:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (inmsg->hdr.flags.response_expected == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static int visorbus_destroy(struct controlvm_message *inmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct controlvm_message_header *pmsg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct visor_device *bus_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (!bus_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (bus_info->state.created == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (bus_info->pending_msg_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /* only non-NULL if dev is still waiting on a response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (inmsg->hdr.flags.response_expected == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!pmsg_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) memcpy(pmsg_hdr, &inmsg->hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) sizeof(struct controlvm_message_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) bus_info->pending_msg_hdr = pmsg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* Response will be handled by visorbus_remove_instance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) visorbus_remove_instance(bus_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) err_respond:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (inmsg->hdr.flags.response_expected == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static const guid_t *parser_id_get(struct parser_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return &ctx->data.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static void *parser_string_get(u8 *pscan, int nscan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int value_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) void *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (nscan == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) value_length = strnlen(pscan, nscan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) value = kzalloc(value_length + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (!value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (value_length > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) memcpy(value, pscan, value_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static void *parser_name_get(struct parser_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct visor_controlvm_parameters_header *phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) phdr = &ctx->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if ((unsigned long)phdr->name_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) (unsigned long)phdr->name_length > ctx->param_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ctx->curr = (char *)&phdr + phdr->name_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ctx->bytes_remaining = phdr->name_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return parser_string_get(ctx->curr, phdr->name_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static int visorbus_configure(struct controlvm_message *inmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct parser_context *parser_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct controlvm_message_packet *cmd = &inmsg->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) u32 bus_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct visor_device *bus_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) bus_no = cmd->configure_bus.bus_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!bus_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (bus_info->state.created == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (bus_info->pending_msg_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) err = visorchannel_set_clientpartition(bus_info->visorchannel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) cmd->configure_bus.guest_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (parser_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) const guid_t *partition_guid = parser_id_get(parser_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) guid_copy(&bus_info->partition_guid, partition_guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) bus_info->name = parser_name_get(parser_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (inmsg->hdr.flags.response_expected == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) err_respond:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) "%s exited with err: %d\n", __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (inmsg->hdr.flags.response_expected == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static int visorbus_device_create(struct controlvm_message *inmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct controlvm_message_packet *cmd = &inmsg->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct controlvm_message_header *pmsg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) u32 bus_no = cmd->create_device.bus_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) u32 dev_no = cmd->create_device.dev_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct visor_device *dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct visor_device *bus_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct visorchannel *visorchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!bus_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) "failed to get bus by id: %d\n", bus_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (bus_info->state.created == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) "bus not created, id: %d\n", bus_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (dev_info && dev_info->state.created == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) "failed to get bus by id: %d/%d\n", bus_no, dev_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!dev_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) dev_info->chipset_bus_no = bus_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) dev_info->chipset_dev_no = dev_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dev_info->device.parent = &bus_info->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) visorchannel = visorchannel_create(cmd->create_device.channel_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) &cmd->create_device.data_type_guid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!visorchannel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) "failed to create visorchannel: %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) bus_no, dev_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) goto err_free_dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) dev_info->visorchannel = visorchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) guid_copy(&dev_info->channel_type_guid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) &cmd->create_device.data_type_guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (guid_equal(&cmd->create_device.data_type_guid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) &visor_vhba_channel_guid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) err = save_crash_message(inmsg, CRASH_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) goto err_destroy_visorchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (inmsg->hdr.flags.response_expected == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!pmsg_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) goto err_destroy_visorchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) memcpy(pmsg_hdr, &inmsg->hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) sizeof(struct controlvm_message_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dev_info->pending_msg_hdr = pmsg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* create_visor_device will send response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) err = create_visor_device(dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) goto err_destroy_visorchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) err_destroy_visorchannel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) visorchannel_destroy(visorchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) err_free_dev_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) kfree(dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) err_respond:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (inmsg->hdr.flags.response_expected == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static int visorbus_device_changestate(struct controlvm_message *inmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct controlvm_message_packet *cmd = &inmsg->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct controlvm_message_header *pmsg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) u32 bus_no = cmd->device_change_state.bus_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) u32 dev_no = cmd->device_change_state.dev_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct visor_segment_state state = cmd->device_change_state.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct visor_device *dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (!dev_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (dev_info->state.created == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (dev_info->pending_msg_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* only non-NULL if dev is still waiting on a response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (inmsg->hdr.flags.response_expected == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!pmsg_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) memcpy(pmsg_hdr, &inmsg->hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) sizeof(struct controlvm_message_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) dev_info->pending_msg_hdr = pmsg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (state.alive == segment_state_running.alive &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) state.operating == segment_state_running.operating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* Response will be sent from visorchipset_device_resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) err = visorchipset_device_resume(dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* ServerNotReady / ServerLost / SegmentStateStandby */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) else if (state.alive == segment_state_standby.alive &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) state.operating == segment_state_standby.operating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * technically this is standby case where server is lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * Response will be sent from visorchipset_device_pause.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) err = visorchipset_device_pause(dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) err_respond:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (inmsg->hdr.flags.response_expected == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static int visorbus_device_destroy(struct controlvm_message *inmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct controlvm_message_packet *cmd = &inmsg->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct controlvm_message_header *pmsg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) u32 bus_no = cmd->destroy_device.bus_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) u32 dev_no = cmd->destroy_device.dev_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct visor_device *dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (!dev_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (dev_info->state.created == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (dev_info->pending_msg_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* only non-NULL if dev is still waiting on a response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (inmsg->hdr.flags.response_expected == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (!pmsg_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) memcpy(pmsg_hdr, &inmsg->hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) sizeof(struct controlvm_message_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) dev_info->pending_msg_hdr = pmsg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) kfree(dev_info->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) remove_visor_device(dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) err_respond:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (inmsg->hdr.flags.response_expected == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * The general parahotplug flow works as follows. The visorchipset receives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * a DEVICE_CHANGESTATE message from Command specifying a physical device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * to enable or disable. The CONTROLVM message handler calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * parahotplug_process_message, which then adds the message to a global list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * and kicks off a udev event which causes a user level script to enable or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * disable the specified device. The udev script then writes to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * /sys/devices/platform/visorchipset/parahotplug, which causes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * parahotplug store functions to get called, at which point the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * appropriate CONTROLVM message is retrieved from the list and responded to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) #define PARAHOTPLUG_TIMEOUT_MS 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * parahotplug_next_id() - generate unique int to match an outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * CONTROLVM message with a udev script /sys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * Return: a unique integer value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static int parahotplug_next_id(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static atomic_t id = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return atomic_inc_return(&id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * parahotplug_next_expiration() - returns the time (in jiffies) when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * CONTROLVM message on the list should expire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * -- PARAHOTPLUG_TIMEOUT_MS in the future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * Return: expected expiration time (in jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static unsigned long parahotplug_next_expiration(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * parahotplug_request_create() - create a parahotplug_request, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * basically a wrapper for a CONTROLVM_MESSAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * that we can stick on a list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * @msg: the message to insert in the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Return: the request containing the provided message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static struct parahotplug_request *parahotplug_request_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct controlvm_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct parahotplug_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) req = kmalloc(sizeof(*req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) req->id = parahotplug_next_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) req->expiration = parahotplug_next_expiration();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) req->msg = *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * parahotplug_request_destroy() - free a parahotplug_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * @req: the request to deallocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static void parahotplug_request_destroy(struct parahotplug_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static LIST_HEAD(parahotplug_request_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* lock for above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static DEFINE_SPINLOCK(parahotplug_request_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * parahotplug_request_complete() - mark request as complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * @id: the id of the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * @active: indicates whether the request is assigned to active partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * Called from the /sys handler, which means the user script has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * finished the enable/disable. Find the matching identifier, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * respond to the CONTROLVM message with success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * Return: 0 on success or -EINVAL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static int parahotplug_request_complete(int id, u16 active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct list_head *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct parahotplug_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) spin_lock(¶hotplug_request_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* Look for a request matching "id". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) req = list_entry(pos, struct parahotplug_request, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (req->id == id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * Found a match. Remove it from the list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * respond.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) spin_unlock(¶hotplug_request_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) req->msg.cmd.device_change_state.state.active = active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (req->msg.hdr.flags.response_expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) controlvm_respond(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) &req->msg.cmd.device_change_state.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) parahotplug_request_destroy(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) spin_unlock(¶hotplug_request_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * devicedisabled_store() - disables the hotplug device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * @dev: sysfs interface variable not utilized in this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * @attr: sysfs interface variable not utilized in this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * @buf: buffer containing the device id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * @count: the size of the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * The parahotplug/devicedisabled interface gets called by our support script
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * when an SR-IOV device has been shut down. The ID is passed to the script
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * and then passed back when the device has been removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * Return: the size of the buffer for success or negative for error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static ssize_t devicedisabled_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (kstrtouint(buf, 10, &id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) err = parahotplug_request_complete(id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static DEVICE_ATTR_WO(devicedisabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * deviceenabled_store() - enables the hotplug device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * @dev: sysfs interface variable not utilized in this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * @attr: sysfs interface variable not utilized in this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * @buf: buffer containing the device id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * @count: the size of the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * The parahotplug/deviceenabled interface gets called by our support script
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * when an SR-IOV device has been recovered. The ID is passed to the script
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * and then passed back when the device has been brought back up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * Return: the size of the buffer for success or negative for error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static ssize_t deviceenabled_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (kstrtouint(buf, 10, &id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) parahotplug_request_complete(id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static DEVICE_ATTR_WO(deviceenabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static struct attribute *visorchipset_install_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) &dev_attr_toolaction.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) &dev_attr_boottotool.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) &dev_attr_error.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) &dev_attr_textid.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) &dev_attr_remaining_steps.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static const struct attribute_group visorchipset_install_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) .name = "install",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) .attrs = visorchipset_install_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static struct attribute *visorchipset_parahotplug_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) &dev_attr_devicedisabled.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) &dev_attr_deviceenabled.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static const struct attribute_group visorchipset_parahotplug_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) .name = "parahotplug",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) .attrs = visorchipset_parahotplug_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static const struct attribute_group *visorchipset_dev_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) &visorchipset_install_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) &visorchipset_parahotplug_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * parahotplug_request_kickoff() - initiate parahotplug request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * @req: the request to initiate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * Cause uevent to run the user level script to do the disable/enable specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * in the parahotplug_request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static int parahotplug_request_kickoff(struct parahotplug_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct controlvm_message_packet *cmd = &req->msg.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) env_func[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) env_func, NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) cmd->device_change_state.state.active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) cmd->device_change_state.bus_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) cmd->device_change_state.dev_no >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) cmd->device_change_state.dev_no & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) KOBJ_CHANGE, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * parahotplug_process_message() - enables or disables a PCI device by kicking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * off a udev script
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * @inmsg: the message indicating whether to enable or disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static int parahotplug_process_message(struct controlvm_message *inmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct parahotplug_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) req = parahotplug_request_create(inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * For enable messages, just respond with success right away, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * need to wait to see if the enable was successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (inmsg->cmd.device_change_state.state.active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) err = parahotplug_request_kickoff(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) &inmsg->cmd.device_change_state.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) parahotplug_request_destroy(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * For disable messages, add the request to the request list before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * kicking off the udev script. It won't get responded to until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * script has indicated it's done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) spin_lock(¶hotplug_request_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) list_add_tail(&req->list, ¶hotplug_request_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) spin_unlock(¶hotplug_request_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) err = parahotplug_request_kickoff(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) goto err_respond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) err_respond:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) controlvm_respond(&inmsg->hdr, err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) &inmsg->cmd.device_change_state.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * chipset_ready_uevent() - sends chipset_ready action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * Return: 0 on success, negative on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (msg_hdr->flags.response_expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) controlvm_respond(msg_hdr, res, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * chipset_selftest_uevent() - sends chipset_selftest action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * Return: 0 on success, negative on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) char env_selftest[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) char *envp[] = { env_selftest, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) KOBJ_CHANGE, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (msg_hdr->flags.response_expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) controlvm_respond(msg_hdr, res, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * chipset_notready_uevent() - sends chipset_notready action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * Return: 0 on success, negative on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) KOBJ_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (msg_hdr->flags.response_expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) controlvm_respond(msg_hdr, res, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static int unisys_vmcall(unsigned long tuple, unsigned long param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) unsigned long reg_ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) unsigned long reg_ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) reg_ebx = param & 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) reg_ecx = param >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!(cpuid_ecx & 0x80000000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* Need to convert from VMCALL error codes to Linux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) case VMCALL_RESULT_INVALID_PARAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) case VMCALL_RESULT_DATA_UNAVAILABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int controlvm_channel_create(struct visorchipset_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) struct visorchannel *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) virt_to_phys(&dev->controlvm_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) addr = dev->controlvm_params.address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) chan = visorchannel_create(addr, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) &visor_controlvm_channel_guid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) dev->controlvm_channel = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static void setup_crash_devices_work_queue(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct controlvm_message local_crash_bus_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct controlvm_message local_crash_dev_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct controlvm_message msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) .hdr.id = CONTROLVM_CHIPSET_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) .cmd.init_chipset = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) .bus_count = 23,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) .switch_count = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) u32 local_crash_msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) u16 local_crash_msg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /* send init chipset msg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) chipset_init(&msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* get saved message count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) saved_crash_message_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) &local_crash_msg_count, sizeof(u16)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) "failed to read channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /* get saved crash message offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) offsetof(struct visor_controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) saved_crash_message_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) &local_crash_msg_offset, sizeof(u32)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) "failed to read channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /* read create device message for storage bus offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) local_crash_msg_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) &local_crash_bus_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) sizeof(struct controlvm_message)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) "failed to read channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* read create device message for storage device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (visorchannel_read(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) local_crash_msg_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) sizeof(struct controlvm_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) &local_crash_dev_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) sizeof(struct controlvm_message)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) "failed to read channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /* reuse IOVM create bus message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) "no valid create_bus message\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) visorbus_create(&local_crash_bus_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /* reuse create device message for storage device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) dev_err(&chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) "no valid create_device message\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) visorbus_device_create(&local_crash_dev_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) void visorbus_response(struct visor_device *bus_info, int response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) int controlvm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (!bus_info->pending_msg_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) kfree(bus_info->pending_msg_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) bus_info->pending_msg_hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) void visorbus_device_changestate_response(struct visor_device *dev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) int response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct visor_segment_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!dev_info->pending_msg_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) response, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) kfree(dev_info->pending_msg_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) dev_info->pending_msg_hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) static void parser_done(struct parser_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) bool *retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) unsigned long allocbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct parser_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) void *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) *retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* alloc an extra byte to ensure payload is \0 terminated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) sizeof(struct visor_controlvm_parameters_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) MAX_CONTROLVM_PAYLOAD_BYTES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) *retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ctx = kzalloc(allocbytes, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (!ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) *retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) ctx->allocbytes = allocbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) ctx->param_bytes = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) mapping = memremap(addr, bytes, MEMREMAP_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (!mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) goto err_finish_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) memcpy(&ctx->data, mapping, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) memunmap(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) ctx->byte_stream = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) err_finish_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * handle_command() - process a controlvm message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * @inmsg: the message to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * @channel_addr: address of the controlvm channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * 0 - Successfully processed the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * -EAGAIN - ControlVM message was not processed and should be retried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * reading the next controlvm message; a scenario where this can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * occur is when we need to throttle the allocation of memory in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * which to copy out controlvm payload data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * < 0 - error: ControlVM message was processed but an error occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) struct controlvm_message_packet *cmd = &inmsg.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) u64 parm_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) u32 parm_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct parser_context *parser_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct controlvm_message ackmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /* create parsing context if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) parm_bytes = inmsg.hdr.payload_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * Parameter and channel addresses within test messages actually lie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * within our OS-controlled memory. We need to know that, because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * makes a difference in how we compute the virtual address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (parm_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) bool retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (!parser_ctx && retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) CONTROLVM_QUEUE_ACK, &ackmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) switch (inmsg.hdr.id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) case CONTROLVM_CHIPSET_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) err = chipset_init(&inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) case CONTROLVM_BUS_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) err = visorbus_create(&inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) case CONTROLVM_BUS_DESTROY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) err = visorbus_destroy(&inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) case CONTROLVM_BUS_CONFIGURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) err = visorbus_configure(&inmsg, parser_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) case CONTROLVM_DEVICE_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) err = visorbus_device_create(&inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) case CONTROLVM_DEVICE_CHANGESTATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (cmd->device_change_state.flags.phys_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) err = parahotplug_process_message(&inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * save the hdr and cmd structures for later use when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * sending back the response to Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) err = visorbus_device_changestate(&inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) case CONTROLVM_DEVICE_DESTROY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) err = visorbus_device_destroy(&inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) case CONTROLVM_DEVICE_CONFIGURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /* no op just send a respond that we passed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (inmsg.hdr.flags.response_expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) case CONTROLVM_CHIPSET_READY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) err = chipset_ready_uevent(&inmsg.hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) case CONTROLVM_CHIPSET_SELFTEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) err = chipset_selftest_uevent(&inmsg.hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) case CONTROLVM_CHIPSET_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) err = chipset_notready_uevent(&inmsg.hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) err = -ENOMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (inmsg.hdr.flags.response_expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) controlvm_respond(&inmsg.hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) -CONTROLVM_RESP_ID_UNKNOWN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (parser_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) parser_done(parser_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) parser_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * read_controlvm_event() - retreives the next message from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * CONTROLVM_QUEUE_EVENT queue in the controlvm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * @msg: pointer to the retrieved message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * Return: 0 if valid message was retrieved or -error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static int read_controlvm_event(struct controlvm_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) CONTROLVM_QUEUE_EVENT, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /* got a message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (msg->hdr.flags.test_message == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * parahotplug_process_list() - remove any request from the list that's been on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * there too long and respond with an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) static void parahotplug_process_list(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct list_head *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) spin_lock(¶hotplug_request_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) struct parahotplug_request *req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) list_entry(pos, struct parahotplug_request, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (!time_after_eq(jiffies, req->expiration))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (req->msg.hdr.flags.response_expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) controlvm_respond(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) &req->msg.hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) &req->msg.cmd.device_change_state.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) parahotplug_request_destroy(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) spin_unlock(¶hotplug_request_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) static void controlvm_periodic_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) struct controlvm_message inmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /* Drain the RESPONSE queue make it empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) err = visorchannel_signalremove(chipset_dev->controlvm_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) CONTROLVM_QUEUE_RESPONSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) &inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (err != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) goto schedule_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (chipset_dev->controlvm_pending_msg_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * we throttled processing of a prior msg, so try to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * it again rather than reading a new one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) inmsg = chipset_dev->controlvm_pending_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) chipset_dev->controlvm_pending_msg_valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) err = read_controlvm_event(&inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) while (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) chipset_dev->most_recent_message_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) err = handle_command(inmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) visorchannel_get_physaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) (chipset_dev->controlvm_channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (err == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) chipset_dev->controlvm_pending_msg = inmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) chipset_dev->controlvm_pending_msg_valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) err = read_controlvm_event(&inmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /* parahotplug_worker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) parahotplug_process_list();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * The controlvm messages are sent in a bulk. If we start receiving messages, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * want the polling to be fast. If we do not receive any message for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * MIN_IDLE_SECONDS, we can slow down the polling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) schedule_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) (HZ * MIN_IDLE_SECONDS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * it's been longer than MIN_IDLE_SECONDS since we processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * our last controlvm message; slow down the polling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) chipset_dev->poll_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static int visorchipset_init(struct acpi_device *acpi_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) struct visorchannel *controlvm_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (!chipset_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) err = controlvm_channel_create(chipset_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) goto error_free_chipset_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) acpi_device->driver_data = chipset_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) chipset_dev->acpi_device = acpi_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) visorchipset_dev_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) goto error_destroy_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) controlvm_channel = chipset_dev->controlvm_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) &chipset_dev->acpi_device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) &visor_controlvm_channel_guid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) "controlvm",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) sizeof(struct visor_controlvm_channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) VISOR_CONTROLVM_CHANNEL_VERSIONID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) VISOR_CHANNEL_SIGNATURE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) goto error_delete_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* if booting in a crash kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (is_kdump_kernel())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) setup_crash_devices_work_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) controlvm_periodic_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) chipset_dev->most_recent_message_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) chipset_dev->poll_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) err = visorbus_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) goto error_cancel_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) error_cancel_work:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) error_delete_groups:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) visorchipset_dev_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) error_destroy_channel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) visorchannel_destroy(chipset_dev->controlvm_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) error_free_chipset_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) kfree(chipset_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) dev_err(&acpi_device->dev, "failed with error %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static int visorchipset_exit(struct acpi_device *acpi_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) visorbus_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) visorchipset_dev_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) visorchannel_destroy(chipset_dev->controlvm_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) kfree(chipset_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static const struct acpi_device_id unisys_device_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {"PNP0A07", 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) {"", 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static struct acpi_driver unisys_acpi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) .name = "unisys_acpi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) .class = "unisys_acpi_class",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) .ids = unisys_device_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) .ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) .add = visorchipset_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) .remove = visorchipset_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) static __init int visorutil_spar_detect(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) unsigned int eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /* check the ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return (ebx == UNISYS_VISOR_ID_EBX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) (ecx == UNISYS_VISOR_ID_ECX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) (edx == UNISYS_VISOR_ID_EDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) static int __init init_unisys(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (!visorutil_spar_detect())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) result = acpi_bus_register_driver(&unisys_acpi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) pr_info("Unisys Visorchipset Driver Loaded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) static void __exit exit_unisys(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) acpi_bus_unregister_driver(&unisys_acpi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) module_init(init_unisys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) module_exit(exit_unisys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) MODULE_AUTHOR("Unisys");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");