Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * PCI support in ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright (C) 2004 Intel Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/pci_hotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/pci-acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/pm_qos.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * The GUID is defined in the PCI Firmware Specification available here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) const guid_t pci_acpi_dsm_guid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 		  0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	struct device *dev = &adev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	struct resource_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	INIT_LIST_HEAD(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	ret = acpi_dev_get_resources(adev, &list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 				     acpi_dev_filter_resource_type_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 				     (void *) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		dev_err(dev, "failed to parse _CRS method, error code %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 			ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 		dev_err(dev, "no IO and memory resources present in _CRS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	entry = list_first_entry(&list, struct resource_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	*res = *entry->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	acpi_dev_free_resource_list(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 				 void **retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	u16 *segment = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	unsigned long long uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	if (ACPI_FAILURE(status) || uid != *segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		return AE_CTRL_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	*(acpi_handle *)retval = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	return AE_CTRL_TERMINATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 			  struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct acpi_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	acpi_handle handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		dev_err(dev, "can't find _HID %s device to locate resources\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 			hid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	ret = acpi_bus_get_device(handle, &adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	ret = acpi_get_rc_addr(adev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		dev_err(dev, "can't get resource from %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 			dev_name(&adev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	acpi_status status = AE_NOT_EXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	unsigned long long mcfg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	if (handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 					       NULL, &mcfg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	return (phys_addr_t)mcfg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) /* _HPX PCI Setting Record (Type 0); same as _HPP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) struct hpx_type0 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	u32 revision;		/* Not present in _HPP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	u8  cache_line_size;	/* Not applicable to PCIe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	u8  latency_timer;	/* Not applicable to PCIe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	u8  enable_serr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	u8  enable_perr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) static struct hpx_type0 pci_default_type0 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	.revision = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	.cache_line_size = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	.latency_timer = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	.enable_serr = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	.enable_perr = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	u16 pci_cmd, pci_bctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if (!hpx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		hpx = &pci_default_type0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	if (hpx->revision > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			 hpx->revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		hpx = &pci_default_type0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	if (hpx->enable_serr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		pci_cmd |= PCI_COMMAND_SERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	if (hpx->enable_perr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		pci_cmd |= PCI_COMMAND_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* Program bridge control value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 				      hpx->latency_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		if (hpx->enable_perr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static acpi_status decode_type0_hpx_record(union acpi_object *record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 					   struct hpx_type0 *hpx0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	union acpi_object *fields = record->package.elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	u32 revision = fields[1].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	switch (revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		if (record->package.count != 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 			return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		for (i = 2; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			if (fields[i].type != ACPI_TYPE_INTEGER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 				return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		hpx0->revision        = revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		hpx0->cache_line_size = fields[2].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		hpx0->latency_timer   = fields[3].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		hpx0->enable_serr     = fields[4].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		hpx0->enable_perr     = fields[5].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		pr_warn("%s: Type 0 Revision %d record not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		       __func__, revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) /* _HPX PCI-X Setting Record (Type 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) struct hpx_type1 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	u32 revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	u8  max_mem_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	u8  avg_max_split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	u16 tot_max_split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	if (!hpx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	if (!pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	pci_warn(dev, "PCI-X settings not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) static acpi_status decode_type1_hpx_record(union acpi_object *record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 					   struct hpx_type1 *hpx1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	union acpi_object *fields = record->package.elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	u32 revision = fields[1].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	switch (revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		if (record->package.count != 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		for (i = 2; i < 5; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 			if (fields[i].type != ACPI_TYPE_INTEGER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 				return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		hpx1->revision      = revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		hpx1->max_mem_read  = fields[2].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		hpx1->avg_max_split = fields[3].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		hpx1->tot_max_split = fields[4].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		pr_warn("%s: Type 1 Revision %d record not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		       __func__, revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static bool pcie_root_rcb_set(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	struct pci_dev *rp = pcie_find_root_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	u16 lnkctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	if (!rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	if (lnkctl & PCI_EXP_LNKCTL_RCB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) /* _HPX PCI Express Setting Record (Type 2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) struct hpx_type2 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	u32 revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	u32 unc_err_mask_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	u32 unc_err_mask_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	u32 unc_err_sever_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	u32 unc_err_sever_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	u32 cor_err_mask_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	u32 cor_err_mask_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	u32 adv_err_cap_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	u32 adv_err_cap_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	u16 pci_exp_devctl_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	u16 pci_exp_devctl_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	u16 pci_exp_lnkctl_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	u16 pci_exp_lnkctl_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	u32 sec_unc_err_sever_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	u32 sec_unc_err_sever_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	u32 sec_unc_err_mask_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	u32 sec_unc_err_mask_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	u32 reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	if (!hpx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (!pci_is_pcie(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (hpx->revision > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		pci_warn(dev, "PCIe settings rev %d not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 			 hpx->revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	 * those to make sure they're consistent with the rest of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	 * platform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 				    PCI_EXP_DEVCTL_READRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 				    PCI_EXP_DEVCTL_READRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	/* Initialize Device Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	/* Initialize Link Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	if (pcie_cap_has_lnkctl(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		 * If the Root Port supports Read Completion Boundary of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		 * 128, set RCB to 128.  Otherwise, clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		if (pcie_root_rcb_set(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	/* Find Advanced Error Reporting Enhanced Capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (!pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	/* Initialize Uncorrectable Error Mask Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	/* Initialize Uncorrectable Error Severity Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	/* Initialize Correctable Error Mask Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/* Initialize Advanced Error Capabilities and Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	/* Don't enable ECRC generation or checking if unsupported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 * FIXME: The following two registers are not supported yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	 *   o Secondary Uncorrectable Error Severity Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	 *   o Secondary Uncorrectable Error Mask Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) static acpi_status decode_type2_hpx_record(union acpi_object *record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 					   struct hpx_type2 *hpx2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	union acpi_object *fields = record->package.elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	u32 revision = fields[1].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	switch (revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		if (record->package.count != 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		for (i = 2; i < 18; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			if (fields[i].type != ACPI_TYPE_INTEGER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 				return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		hpx2->revision      = revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		hpx2->unc_err_mask_and      = fields[2].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		hpx2->unc_err_mask_or       = fields[3].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		hpx2->unc_err_sever_and     = fields[4].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		hpx2->unc_err_sever_or      = fields[5].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		hpx2->cor_err_mask_and      = fields[6].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		hpx2->cor_err_mask_or       = fields[7].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		hpx2->adv_err_cap_and       = fields[8].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		hpx2->adv_err_cap_or        = fields[9].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		hpx2->pci_exp_devctl_and    = fields[10].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		hpx2->pci_exp_devctl_or     = fields[11].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		hpx2->pci_exp_lnkctl_and    = fields[12].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		hpx2->pci_exp_lnkctl_or     = fields[13].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		hpx2->sec_unc_err_sever_and = fields[14].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		hpx2->sec_unc_err_sever_or  = fields[15].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		hpx2->sec_unc_err_mask_and  = fields[16].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		hpx2->sec_unc_err_mask_or   = fields[17].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		pr_warn("%s: Type 2 Revision %d record not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		       __func__, revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) /* _HPX PCI Express Setting Record (Type 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) struct hpx_type3 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	u16 device_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	u16 function_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	u16 config_space_location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	u16 pci_exp_cap_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	u16 pci_exp_cap_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	u16 pci_exp_vendor_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	u16 dvsec_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	u16 dvsec_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	u16 match_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	u32 match_mask_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	u32 match_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	u16 reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	u32 reg_mask_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	u32 reg_mask_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) enum hpx_type3_dev_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	HPX_TYPE_ENDPOINT	= BIT(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	HPX_TYPE_LEG_END	= BIT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	HPX_TYPE_RC_END		= BIT(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	HPX_TYPE_RC_EC		= BIT(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	HPX_TYPE_ROOT_PORT	= BIT(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	HPX_TYPE_UPSTREAM	= BIT(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	HPX_TYPE_DOWNSTREAM	= BIT(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	HPX_TYPE_PCI_BRIDGE	= BIT(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	HPX_TYPE_PCIE_BRIDGE	= BIT(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) static u16 hpx3_device_type(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	u16 pcie_type = pci_pcie_type(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	static const int pcie_to_hpx3_type[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		[PCI_EXP_TYPE_ENDPOINT]    = HPX_TYPE_ENDPOINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		[PCI_EXP_TYPE_LEG_END]     = HPX_TYPE_LEG_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		[PCI_EXP_TYPE_RC_END]      = HPX_TYPE_RC_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		[PCI_EXP_TYPE_RC_EC]       = HPX_TYPE_RC_EC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		[PCI_EXP_TYPE_ROOT_PORT]   = HPX_TYPE_ROOT_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		[PCI_EXP_TYPE_UPSTREAM]    = HPX_TYPE_UPSTREAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		[PCI_EXP_TYPE_DOWNSTREAM]  = HPX_TYPE_DOWNSTREAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		[PCI_EXP_TYPE_PCI_BRIDGE]  = HPX_TYPE_PCI_BRIDGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		[PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	return pcie_to_hpx3_type[pcie_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) enum hpx_type3_fn_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	HPX_FN_NORMAL		= BIT(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	HPX_FN_SRIOV_PHYS	= BIT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	HPX_FN_SRIOV_VIRT	= BIT(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static u8 hpx3_function_type(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (dev->is_virtfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		return HPX_FN_SRIOV_VIRT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		return HPX_FN_SRIOV_PHYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		return HPX_FN_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	u8 cap_ver = hpx3_cap_id & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	else if (cap_ver == pcie_cap_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) enum hpx_type3_cfg_loc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	HPX_CFG_PCICFG		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	HPX_CFG_PCIE_CAP	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	HPX_CFG_PCIE_CAP_EXT	= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	HPX_CFG_VEND_CAP	= 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	HPX_CFG_DVSEC		= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	HPX_CFG_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) static void program_hpx_type3_register(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 				       const struct hpx_type3 *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	u32 match_reg, write_reg, header, orig_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	u16 pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (!(hpx3_device_type(dev) & reg->device_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (!(hpx3_function_type(dev) & reg->function_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	switch (reg->config_space_location) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	case HPX_CFG_PCICFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	case HPX_CFG_PCIE_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		pos = pci_find_capability(dev, reg->pci_exp_cap_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		if (pos == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	case HPX_CFG_PCIE_CAP_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		if (pos == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		pci_read_config_dword(dev, pos, &header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 					  reg->pci_exp_cap_ver))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	case HPX_CFG_VEND_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	case HPX_CFG_DVSEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	if ((match_reg & reg->match_mask_and) != reg->match_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	orig_value = write_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	write_reg &= reg->reg_mask_and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	write_reg |= reg->reg_mask_or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	if (orig_value == write_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		pos, orig_value, write_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (!hpx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	if (!pci_is_pcie(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	program_hpx_type3_register(dev, hpx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 				union acpi_object *reg_fields)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	hpx3_reg->device_type            = reg_fields[0].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	hpx3_reg->function_type          = reg_fields[1].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	hpx3_reg->config_space_location  = reg_fields[2].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	hpx3_reg->pci_exp_cap_id         = reg_fields[3].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	hpx3_reg->pci_exp_cap_ver        = reg_fields[4].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	hpx3_reg->pci_exp_vendor_id      = reg_fields[5].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	hpx3_reg->dvsec_id               = reg_fields[6].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	hpx3_reg->dvsec_rev              = reg_fields[7].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	hpx3_reg->match_offset           = reg_fields[8].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	hpx3_reg->match_mask_and         = reg_fields[9].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	hpx3_reg->match_value            = reg_fields[10].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	hpx3_reg->reg_offset             = reg_fields[11].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	hpx3_reg->reg_mask_and           = reg_fields[12].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	hpx3_reg->reg_mask_or            = reg_fields[13].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static acpi_status program_type3_hpx_record(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 					   union acpi_object *record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	union acpi_object *fields = record->package.elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	u32 desc_count, expected_length, revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	union acpi_object *reg_fields;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	struct hpx_type3 hpx3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	revision = fields[1].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	switch (revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		desc_count = fields[2].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		expected_length = 3 + desc_count * 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		if (record->package.count != expected_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		for (i = 2; i < expected_length; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			if (fields[i].type != ACPI_TYPE_INTEGER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 				return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		for (i = 0; i < desc_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			reg_fields = fields + 3 + i * 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			parse_hpx3_register(&hpx3, reg_fields);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			program_hpx_type3(dev, &hpx3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			"%s: Type 3 Revision %d record not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			__func__, revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	union acpi_object *package, *record, *fields;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	struct hpx_type0 hpx0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	struct hpx_type1 hpx1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	struct hpx_type2 hpx2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	package = (union acpi_object *)buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	if (package->type != ACPI_TYPE_PACKAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		status = AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	for (i = 0; i < package->package.count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		record = &package->package.elements[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		if (record->type != ACPI_TYPE_PACKAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			status = AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		fields = record->package.elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		if (fields[0].type != ACPI_TYPE_INTEGER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		    fields[1].type != ACPI_TYPE_INTEGER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			status = AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		type = fields[0].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 			memset(&hpx0, 0, sizeof(hpx0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			status = decode_type0_hpx_record(record, &hpx0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 				goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			program_hpx_type0(dev, &hpx0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			memset(&hpx1, 0, sizeof(hpx1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			status = decode_type1_hpx_record(record, &hpx1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 				goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			program_hpx_type1(dev, &hpx1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			memset(&hpx2, 0, sizeof(hpx2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			status = decode_type2_hpx_record(record, &hpx2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 				goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			program_hpx_type2(dev, &hpx2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			status = program_type3_hpx_record(dev, record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 				goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			pr_err("%s: Type %d record not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			       __func__, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			status = AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	kfree(buffer.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	union acpi_object *package, *fields;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	struct hpx_type0 hpx0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	memset(&hpx0, 0, sizeof(hpx0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	package = (union acpi_object *) buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (package->type != ACPI_TYPE_PACKAGE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	    package->package.count != 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		status = AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	fields = package->package.elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		if (fields[i].type != ACPI_TYPE_INTEGER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			status = AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	hpx0.revision        = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	hpx0.cache_line_size = fields[0].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	hpx0.latency_timer   = fields[1].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	hpx0.enable_serr     = fields[2].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	hpx0.enable_perr     = fields[3].integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	program_hpx_type0(dev, &hpx0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	kfree(buffer.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) /* pci_acpi_program_hp_params
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * @dev - the pci_dev for which we want parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) int pci_acpi_program_hp_params(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	acpi_handle handle, phandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	struct pci_bus *pbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	if (acpi_pci_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	for (pbus = dev->bus; pbus; pbus = pbus->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		handle = acpi_pci_get_bridge_handle(pbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		if (handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	 * _HPP settings apply to all child buses, until another _HPP is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	 * encountered. If we don't find an _HPP for the input pci dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	 * look for it in the parent device scope since that would apply to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	 * this pci dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	while (handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		status = acpi_run_hpx(dev, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		if (ACPI_SUCCESS(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		status = acpi_run_hpp(dev, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		if (ACPI_SUCCESS(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		if (acpi_is_root_bridge(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		status = acpi_get_parent(handle, &phandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		handle = phandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785)  * pciehp_is_native - Check whether a hotplug port is handled by the OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786)  * @bridge: Hotplug port to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788)  * Returns true if the given @bridge is handled by the native PCIe hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789)  * driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) bool pciehp_is_native(struct pci_dev *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	const struct pci_host_bridge *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	u32 slot_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	if (pcie_ports_native)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	host = pci_find_host_bridge(bridge->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	return host->native_pcie_hotplug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  * shpchp_is_native - Check whether a hotplug port is handled by the OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  * @bridge: Hotplug port to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  * Returns true if the given @bridge is handled by the native SHPC hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  * driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) bool shpchp_is_native(struct pci_dev *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	return bridge->shpc_managed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  * pci_acpi_wake_bus - Root bus wakeup notification fork function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  * @context: Device wakeup context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	struct acpi_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	struct acpi_pci_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	adev = container_of(context, struct acpi_device, wakeup.context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	root = acpi_driver_data(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	pci_pme_wakeup_bus(root->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * pci_acpi_wake_dev - PCI device wakeup notification work function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  * @context: Device wakeup context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct pci_dev *pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	pci_dev = to_pci_dev(context->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (pci_dev->pme_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		pci_dev->pme_poll = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if (pci_dev->current_state == PCI_D3cold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		pci_wakeup_event(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		pm_request_resume(&pci_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	/* Clear PME Status if set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	if (pci_dev->pme_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		pci_check_pme_status(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	pci_wakeup_event(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	pm_request_resume(&pci_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	pci_pme_wakeup_bus(pci_dev->subordinate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * @dev: PCI root bridge ACPI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875)  * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  * @dev: ACPI device to add the notifier for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877)  * @pci_dev: PCI device to check for the PME status if an event is signaled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				     struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886)  * _SxD returns the D-state with the highest power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887)  * (lowest D-state number) supported in the S-state "x".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889)  * If the devices does not have a _PRW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890)  * (Power Resources for Wake) supporting system wakeup from "x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891)  * then the OS is free to choose a lower power (higher number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  * D-state) than the return value from _SxD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  * But if _PRW is enabled at S-state "x", the OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  * must not choose a power lower than _SxD --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * unless the device has an _SxW method specifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * the lowest power (highest D-state number) the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  * may enter while still able to wake the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * ie. depending on global OS policy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  * if (_PRW at S-state x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  *	choose from highest power _SxD to lowest power _SxW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  * else // no _PRW at S-state x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  *	choose highest power _SxD or any lower power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	int acpi_state, d_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	if (pdev->no_d3cold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		d_max = ACPI_STATE_D3_HOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		d_max = ACPI_STATE_D3_COLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (acpi_state < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		return PCI_POWER_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	switch (acpi_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	case ACPI_STATE_D0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		return PCI_D0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	case ACPI_STATE_D1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		return PCI_D1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	case ACPI_STATE_D2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		return PCI_D2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	case ACPI_STATE_D3_HOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		return PCI_D3hot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	case ACPI_STATE_D3_COLD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		return PCI_D3cold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	return PCI_POWER_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) static struct acpi_device *acpi_pci_find_companion(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) static bool acpi_pci_bridge_d3(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	const struct fwnode_handle *fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	struct acpi_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	struct pci_dev *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (!dev->is_hotplug_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	/* Assume D3 support if the bridge is power-manageable by ACPI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	adev = ACPI_COMPANION(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (!adev && !pci_dev_is_added(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		adev = acpi_pci_find_companion(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		ACPI_COMPANION_SET(&dev->dev, adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (adev && acpi_device_power_manageable(adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	 * Look for a special _DSD property for the root port and if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * is set we know the hierarchy behind it supports D3 just fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	root = pcie_find_root_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if (!root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	adev = ACPI_COMPANION(&root->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (root == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		 * It is possible that the ACPI companion is not yet bound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		 * for the root port so look it up manually here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		if (!adev && !pci_dev_is_added(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			adev = acpi_pci_find_companion(&root->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (!adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	fwnode = acpi_fwnode_handle(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	return val == 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) static bool acpi_pci_power_manageable(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	return adev ? acpi_device_power_manageable(adev) : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	static const u8 state_conv[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		[PCI_D0] = ACPI_STATE_D0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		[PCI_D1] = ACPI_STATE_D1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		[PCI_D2] = ACPI_STATE_D2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		[PCI_D3hot] = ACPI_STATE_D3_HOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		[PCI_D3cold] = ACPI_STATE_D3_COLD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	int error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	/* If the ACPI device has _EJ0, ignore the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	if (!adev || acpi_has_method(adev->handle, "_EJ0"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	case PCI_D3cold:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 				PM_QOS_FLAGS_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	case PCI_D0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	case PCI_D1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	case PCI_D2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	case PCI_D3hot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		error = acpi_device_set_power(adev, state_conv[state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		pci_dbg(dev, "power state changed by ACPI to %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			 acpi_power_state_string(state_conv[state]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	static const pci_power_t state_conv[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		[ACPI_STATE_D0]      = PCI_D0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		[ACPI_STATE_D1]      = PCI_D1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		[ACPI_STATE_D2]      = PCI_D2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		[ACPI_STATE_D3_HOT]  = PCI_D3hot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		[ACPI_STATE_D3_COLD] = PCI_D3cold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	if (!adev || !acpi_device_power_manageable(adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		return PCI_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	state = adev->power.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	if (state == ACPI_STATE_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		return PCI_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	return state_conv[state];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static void acpi_pci_refresh_power_state(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	if (adev && acpi_device_power_manageable(adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		acpi_device_update_power(adev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	while (bus->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		if (acpi_pm_device_can_wakeup(&bus->self->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		bus = bus->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	/* We have reached the root bus. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (bus->bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		if (acpi_pm_device_can_wakeup(bus->bridge))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			return acpi_pm_set_device_wakeup(bus->bridge, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	if (acpi_pm_device_can_wakeup(&dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		return acpi_pm_set_device_wakeup(&dev->dev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	return acpi_pci_propagate_wakeup(dev->bus, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static bool acpi_pci_need_resume(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	 * system-wide suspend/resume confuses the platform firmware, so avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	 * doing that.  According to Section 16.1.6 of ACPI 6.2, endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	 * devices are expected to be in D3 before invoking the S3 entry path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	 * from the firmware, so they should not be affected by this issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (!adev || !acpi_device_power_manageable(adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	if (adev->wakeup.flags.valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	    device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if (acpi_target_system_state() == ACPI_STATE_S0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	return !!adev->power.flags.dsw_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	.bridge_d3 = acpi_pci_bridge_d3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	.is_manageable = acpi_pci_power_manageable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	.set_state = acpi_pci_set_power_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	.get_state = acpi_pci_get_power_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	.refresh_state = acpi_pci_refresh_power_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	.choose_state = acpi_pci_choose_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	.set_wakeup = acpi_pci_wakeup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	.need_resume = acpi_pci_need_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) void acpi_pci_add_bus(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	union acpi_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	struct pci_host_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	acpi_pci_slot_enumerate(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	acpiphp_enumerate_slots(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	 * For a host bridge, check its _DSM for function 8 and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	 * that is available, mark it in pci_host_bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (!pci_is_root_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 				DSM_PCI_POWER_ON_RESET_DELAY, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		bridge = pci_find_host_bridge(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		bridge->ignore_reset_delay = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	ACPI_FREE(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) void acpi_pci_remove_bus(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	if (acpi_pci_disabled || !bus->bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	acpiphp_remove_slots(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	acpi_pci_slot_remove(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* ACPI bus type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static struct acpi_device *acpi_pci_find_companion(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	bool check_children;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	check_children = pci_is_bridge(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	/* Please ref to ACPI spec for the syntax of _ADR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 				      check_children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)  * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)  * @pdev: the PCI device whose delay is to be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)  * @handle: ACPI handle of this device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)  * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)  * control method of either the device itself or the PCI host bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * host bridge.  If it returns one, the OS may assume that all devices in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * the hierarchy have already completed power-on reset delays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  * Function 9, "Device Readiness Durations," applies only to the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  * where it is located.  It returns delay durations required after various
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  * events if the device requires less time than the spec requires.  Delays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)  * from this function take precedence over the Reset Delay function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)  * These _DSM functions are defined by the draft ECN of January 28, 2014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)  * titled "ACPI additions for FW latency optimizations."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static void pci_acpi_optimize_delay(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 				    acpi_handle handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	union acpi_object *obj, *elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	if (bridge->ignore_reset_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		pdev->d3cold_delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 				DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		elements = obj->package.elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		if (elements[0].type == ACPI_TYPE_INTEGER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			value = (int)elements[0].integer.value / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			if (value < PCI_PM_D3COLD_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 				pdev->d3cold_delay = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		if (elements[3].type == ACPI_TYPE_INTEGER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			value = (int)elements[3].integer.value / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			if (value < PCI_PM_D3HOT_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 				pdev->d3hot_delay = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	ACPI_FREE(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static void pci_acpi_set_external_facing(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	 * These root ports expose PCIe (including DMA) outside of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	 * system.  Everything downstream from them is external.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		dev->external_facing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static void pci_acpi_setup(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	struct acpi_device *adev = ACPI_COMPANION(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	if (!adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	pci_acpi_optimize_delay(pci_dev, adev->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	pci_acpi_set_external_facing(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	pci_acpi_add_edr_notifier(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	pci_acpi_add_pm_notifier(adev, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (!adev->wakeup.flags.valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	device_set_wakeup_capable(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	 * For bridges that can do D3 we enable wake automatically (as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	 * we do for the power management itself in that case). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	 * reason is that the bridge may have additional methods such as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	 * _DSW that need to be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	if (pci_dev->bridge_d3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		device_wakeup_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	acpi_pci_wakeup(pci_dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	acpi_device_power_add_dependent(adev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static void pci_acpi_cleanup(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	struct acpi_device *adev = ACPI_COMPANION(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	if (!adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	pci_acpi_remove_edr_notifier(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	pci_acpi_remove_pm_notifier(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	if (adev->wakeup.flags.valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		acpi_device_power_remove_dependent(adev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		if (pci_dev->bridge_d3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 			device_wakeup_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		device_set_wakeup_capable(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static bool pci_acpi_bus_match(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	return dev_is_pci(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) static struct acpi_bus_type acpi_pci_bus = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	.name = "PCI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	.match = pci_acpi_bus_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	.find_companion = acpi_pci_find_companion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	.setup = pci_acpi_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	.cleanup = pci_acpi_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)  * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)  * @fn:       Callback matching a device to a fwnode that identifies a PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  *            MSI domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)  * This should be called by irqchip driver, which is the parent of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)  * the MSI domain to provide callback interface to query fwnode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	pci_msi_get_fwnode_cb = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  * @bus:      The PCI host bridge bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  * This function uses the callback function registered by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  * This returns NULL on error or when the domain is not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	struct fwnode_handle *fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	if (!pci_msi_get_fwnode_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	fwnode = pci_msi_get_fwnode_cb(&bus->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	if (!fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static int __init acpi_pci_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		pci_no_msi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		pcie_no_aspm();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	ret = register_acpi_bus_type(&acpi_pci_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	pci_set_platform_pm(&acpi_pci_platform_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	acpi_pci_slot_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	acpiphp_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) arch_initcall(acpi_pci_init);