Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <linux/libnvdimm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/ndctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include "intel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "nfit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) static ssize_t firmware_activate_noidle_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 		struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static ssize_t firmware_activate_noidle_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 		struct device_attribute *attr, const char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	bool val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	rc = kstrtobool(buf, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	if (val != acpi_desc->fwa_noidle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	acpi_desc->fwa_noidle = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) DEVICE_ATTR_RW(firmware_activate_noidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	unsigned long *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static unsigned long intel_security_flags(struct nvdimm *nvdimm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		enum nvdimm_passphrase_type ptype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	unsigned long security_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		struct nd_intel_get_security_state cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			.nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 			.nd_family = NVDIMM_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			.nd_size_out =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 				sizeof(struct nd_intel_get_security_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 			.nd_fw_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 				sizeof(struct nd_intel_get_security_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 * Short circuit the state retrieval while we are doing overwrite.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * The DSM spec states that the security state is indeterminate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 * until the overwrite DSM completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		return BIT(NVDIMM_SECURITY_OVERWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (rc < 0 || nd_cmd.cmd.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		pr_err("%s: security state retrieval failed (%d:%#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 				nvdimm_name(nvdimm), rc, nd_cmd.cmd.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	/* check and see if security is enabled and locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	if (ptype == NVDIMM_MASTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 			set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		return security_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		    nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	return security_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static int intel_security_freeze(struct nvdimm *nvdimm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		struct nd_intel_freeze_lock cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			.nd_command = NVDIMM_INTEL_FREEZE_LOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			.nd_family = NVDIMM_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			.nd_size_out = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (nd_cmd.cmd.status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int intel_security_change_key(struct nvdimm *nvdimm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		const struct nvdimm_key_data *old_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		const struct nvdimm_key_data *new_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		enum nvdimm_passphrase_type ptype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	unsigned int cmd = ptype == NVDIMM_MASTER ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		NVDIMM_INTEL_SET_PASSPHRASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		struct nd_intel_set_passphrase cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			.nd_family = NVDIMM_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			.nd_size_out = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			.nd_command = cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (!test_bit(cmd, &nfit_mem->dsm_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	memcpy(nd_cmd.cmd.old_pass, old_data->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			sizeof(nd_cmd.cmd.old_pass));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	memcpy(nd_cmd.cmd.new_pass, new_data->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			sizeof(nd_cmd.cmd.new_pass));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	switch (nd_cmd.cmd.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	case ND_INTEL_STATUS_INVALID_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	case ND_INTEL_STATUS_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	case ND_INTEL_STATUS_INVALID_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void nvdimm_invalidate_cache(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		const struct nvdimm_key_data *key_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		struct nd_intel_unlock_unit cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			.nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			.nd_family = NVDIMM_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			.nd_size_out = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	memcpy(nd_cmd.cmd.passphrase, key_data->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			sizeof(nd_cmd.cmd.passphrase));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	switch (nd_cmd.cmd.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	case ND_INTEL_STATUS_INVALID_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	/* DIMM unlocked, invalidate all CPU caches before we read it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	nvdimm_invalidate_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static int intel_security_disable(struct nvdimm *nvdimm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		const struct nvdimm_key_data *key_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		struct nd_intel_disable_passphrase cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			.nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			.nd_family = NVDIMM_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			.nd_size_out = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	memcpy(nd_cmd.cmd.passphrase, key_data->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			sizeof(nd_cmd.cmd.passphrase));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	switch (nd_cmd.cmd.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	case ND_INTEL_STATUS_INVALID_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	case ND_INTEL_STATUS_INVALID_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		const struct nvdimm_key_data *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		enum nvdimm_passphrase_type ptype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	unsigned int cmd = ptype == NVDIMM_MASTER ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		struct nd_intel_secure_erase cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			.nd_family = NVDIMM_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			.nd_size_out = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			.nd_command = cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (!test_bit(cmd, &nfit_mem->dsm_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	/* flush all cache before we erase DIMM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	nvdimm_invalidate_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	memcpy(nd_cmd.cmd.passphrase, key->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			sizeof(nd_cmd.cmd.passphrase));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	switch (nd_cmd.cmd.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	case ND_INTEL_STATUS_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	case ND_INTEL_STATUS_INVALID_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	case ND_INTEL_STATUS_INVALID_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	/* DIMM erased, invalidate all CPU caches before we read it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	nvdimm_invalidate_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		struct nd_intel_query_overwrite cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			.nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			.nd_family = NVDIMM_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			.nd_size_out = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	switch (nd_cmd.cmd.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	case ND_INTEL_STATUS_OQUERY_INPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	/* flush all cache before we make the nvdimms available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	nvdimm_invalidate_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		const struct nvdimm_key_data *nkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		struct nd_intel_overwrite cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			.nd_command = NVDIMM_INTEL_OVERWRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			.nd_family = NVDIMM_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			.nd_size_out = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	/* flush all cache before we erase DIMM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	nvdimm_invalidate_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	memcpy(nd_cmd.cmd.passphrase, nkey->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			sizeof(nd_cmd.cmd.passphrase));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	switch (nd_cmd.cmd.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	case ND_INTEL_STATUS_INVALID_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	case ND_INTEL_STATUS_INVALID_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * TODO: define a cross arch wbinvd equivalent when/if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  * NVDIMM_FAMILY_INTEL command support arrives on another arch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static void nvdimm_invalidate_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	wbinvd_on_all_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void nvdimm_invalidate_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	WARN_ON_ONCE("cache invalidation required after unlock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static const struct nvdimm_security_ops __intel_security_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	.get_flags = intel_security_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	.freeze = intel_security_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	.change_key = intel_security_change_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	.disable = intel_security_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	.unlock = intel_security_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	.erase = intel_security_erase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	.overwrite = intel_security_overwrite,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	.query_overwrite = intel_security_query_overwrite,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		struct nd_intel_bus_fw_activate_businfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		struct nd_intel_bus_fw_activate_businfo cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			.nd_family = NVDIMM_BUS_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 			.nd_size_out =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 				sizeof(struct nd_intel_bus_fw_activate_businfo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 			.nd_fw_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 				sizeof(struct nd_intel_bus_fw_activate_businfo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	*info = nd_cmd.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* The fw_ops expect to be called with the nvdimm_bus_lock() held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static enum nvdimm_fwa_state intel_bus_fwa_state(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		struct nvdimm_bus_descriptor *nd_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	struct nd_intel_bus_fw_activate_businfo info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	struct device *dev = acpi_desc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	enum nvdimm_fwa_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	 * It should not be possible for platform firmware to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	 * busy because activate is a synchronous operation. Treat it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	 * similar to invalid, i.e. always refresh / poll the status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	switch (acpi_desc->fwa_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	case NVDIMM_FWA_INVALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	case NVDIMM_FWA_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		/* check if capability needs to be refreshed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		return acpi_desc->fwa_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	/* Refresh with platform firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	rc = intel_bus_fwa_businfo(nd_desc, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		return NVDIMM_FWA_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	switch (info.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	case ND_INTEL_FWA_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		state = NVDIMM_FWA_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	case ND_INTEL_FWA_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		state = NVDIMM_FWA_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	case ND_INTEL_FWA_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		if (info.activate_tmo > info.max_quiesce_tmo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 			state = NVDIMM_FWA_ARM_OVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			state = NVDIMM_FWA_ARMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		dev_err_once(dev, "invalid firmware activate state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 				info.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		return NVDIMM_FWA_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	 * Capability data is available in the same payload as state. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	 * is expected to be static.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 			 * Skip hibernate cycle by default if platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			 * indicates that it does not need devices to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			 * quiesced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	acpi_desc->fwa_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static enum nvdimm_fwa_capability intel_bus_fwa_capability(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		struct nvdimm_bus_descriptor *nd_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		return acpi_desc->fwa_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		return acpi_desc->fwa_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	return NVDIMM_FWA_CAP_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		struct nd_intel_bus_fw_activate cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 			.nd_family = NVDIMM_BUS_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 			.nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 			.nd_size_out =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 				sizeof(struct nd_intel_bus_fw_activate),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 			.nd_fw_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 				sizeof(struct nd_intel_bus_fw_activate),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		 * Even though activate is run from a suspended context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		 * for safety, still ask platform firmware to force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		 * quiesce devices by default. Let a module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		 * parameter override that policy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		.cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 			.iodev_state = acpi_desc->fwa_noidle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 				? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 				: ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	switch (intel_bus_fwa_state(nd_desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	case NVDIMM_FWA_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	case NVDIMM_FWA_ARM_OVERFLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 			NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	 * Whether the command succeeded, or failed, the agent checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	 * for the result needs to query the DIMMs individually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	 * Increment the activation count to invalidate all the DIMM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	 * states at once (it's otherwise not possible to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	 * acpi_desc->init_mutex in this context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	acpi_desc->fwa_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	dev_dbg(acpi_desc->dev, "result: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	.activate_state = intel_bus_fwa_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	.capability = intel_bus_fwa_capability,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	.activate = intel_bus_fwa_activate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		struct nd_intel_fw_activate_dimminfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		struct nd_intel_fw_activate_dimminfo cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 			.nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 			.nd_family = NVDIMM_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 			.nd_size_out =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 				sizeof(struct nd_intel_fw_activate_dimminfo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 			.nd_fw_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 				sizeof(struct nd_intel_fw_activate_dimminfo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	*info = nd_cmd.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	struct nd_intel_fw_activate_dimminfo info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	 * Similar to the bus state, since activate is synchronous the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	 * busy state should resolve within the context of 'activate'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	switch (nfit_mem->fwa_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	case NVDIMM_FWA_INVALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	case NVDIMM_FWA_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		/* If no activations occurred the old state is still valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		if (nfit_mem->fwa_count == acpi_desc->fwa_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 			return nfit_mem->fwa_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	rc = intel_fwa_dimminfo(nvdimm, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 		return NVDIMM_FWA_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	switch (info.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	case ND_INTEL_FWA_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		nfit_mem->fwa_state = NVDIMM_FWA_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	case ND_INTEL_FWA_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		nfit_mem->fwa_state = NVDIMM_FWA_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	case ND_INTEL_FWA_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		nfit_mem->fwa_state = NVDIMM_FWA_ARMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	switch (info.result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	case ND_INTEL_DIMM_FWA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	case ND_INTEL_DIMM_FWA_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	case ND_INTEL_DIMM_FWA_NOTSTAGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	case ND_INTEL_DIMM_FWA_NEEDRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	case ND_INTEL_DIMM_FWA_MEDIAFAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	case ND_INTEL_DIMM_FWA_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	case ND_INTEL_DIMM_FWA_NOTSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	case ND_INTEL_DIMM_FWA_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	nfit_mem->fwa_count = acpi_desc->fwa_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	return nfit_mem->fwa_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	if (nfit_mem->fwa_count == acpi_desc->fwa_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 			&& nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		return nfit_mem->fwa_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		return nfit_mem->fwa_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	return NVDIMM_FWA_RESULT_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 		struct nd_cmd_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		struct nd_intel_fw_activate_arm cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	} nd_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 		.pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 			.nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 			.nd_family = NVDIMM_FAMILY_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 			.nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 			.nd_size_out =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 				sizeof(struct nd_intel_fw_activate_arm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 			.nd_fw_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 				sizeof(struct nd_intel_fw_activate_arm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 		.cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 			.activate_arm = arm == NVDIMM_FWA_ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 				? ND_INTEL_DIMM_FWA_ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 				: ND_INTEL_DIMM_FWA_DISARM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	switch (intel_fwa_state(nvdimm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	case NVDIMM_FWA_INVALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	case NVDIMM_FWA_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	case NVDIMM_FWA_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 		if (arm == NVDIMM_FWA_DISARM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	case NVDIMM_FWA_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 		if (arm == NVDIMM_FWA_ARM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	 * Invalidate the bus-level state, now that we're committed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	 * changing the 'arm' state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			? "arm" : "disarm", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static const struct nvdimm_fw_ops __intel_fw_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	.activate_state = intel_fwa_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	.activate_result = intel_fwa_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	.arm = intel_fwa_arm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;