Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * (C) Copyright 2007 Novell Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/mempolicy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/sched/isolation.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "pcie/portdrv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) struct pci_dynid {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	struct pci_device_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * @drv: target pci driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * @vendor: PCI vendor ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * @device: PCI device ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * @subvendor: PCI subvendor ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * @subdevice: PCI subdevice ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * @class: PCI class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * @class_mask: PCI class mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * @driver_data: private driver data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * Adds a new dynamic pci device ID to this driver and causes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * driver to probe for all devices again.  @drv must have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * registered prior to calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * Does GFP_KERNEL allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) int pci_add_dynid(struct pci_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		  unsigned int vendor, unsigned int device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		  unsigned int subvendor, unsigned int subdevice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		  unsigned int class, unsigned int class_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		  unsigned long driver_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	struct pci_dynid *dynid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	if (!dynid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	dynid->id.vendor = vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	dynid->id.device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	dynid->id.subvendor = subvendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	dynid->id.subdevice = subdevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	dynid->id.class = class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	dynid->id.class_mask = class_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	dynid->id.driver_data = driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	spin_lock(&drv->dynids.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	list_add_tail(&dynid->node, &drv->dynids.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	spin_unlock(&drv->dynids.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	return driver_attach(&drv->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) EXPORT_SYMBOL_GPL(pci_add_dynid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static void pci_free_dynids(struct pci_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	struct pci_dynid *dynid, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	spin_lock(&drv->dynids.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		list_del(&dynid->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		kfree(dynid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	spin_unlock(&drv->dynids.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  * store_new_id - sysfs frontend to pci_add_dynid()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  * @driver: target device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * @buf: buffer for scanning device ID data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  * @count: input size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * Allow PCI IDs to be added to an existing driver via sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static ssize_t new_id_store(struct device_driver *driver, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 			    size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	struct pci_driver *pdrv = to_pci_driver(driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	const struct pci_device_id *ids = pdrv->id_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	u32 vendor, device, subvendor = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	unsigned long driver_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	int fields = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	fields = sscanf(buf, "%x %x %x %x %x %x %lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			&vendor, &device, &subvendor, &subdevice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			&class, &class_mask, &driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	if (fields < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	if (fields != 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		if (!pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		pdev->vendor = vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		pdev->device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		pdev->subsystem_vendor = subvendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		pdev->subsystem_device = subdevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		pdev->class = class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		if (pci_match_id(pdrv->id_table, pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 			retval = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		kfree(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 			return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	/* Only accept driver_data values that match an existing id_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	   entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	if (ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		while (ids->vendor || ids->subvendor || ids->class_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			if (driver_data == ids->driver_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 				retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			ids++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		if (retval)	/* No match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 			return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			       class, class_mask, driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static DRIVER_ATTR_WO(new_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * store_remove_id - remove a PCI device ID from this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  * @driver: target device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  * @buf: buffer for scanning device ID data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * @count: input size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * Removes a dynamic pci device ID to this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			       size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	struct pci_dynid *dynid, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	struct pci_driver *pdrv = to_pci_driver(driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	u32 vendor, device, subvendor = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	int fields = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	size_t retval = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	fields = sscanf(buf, "%x %x %x %x %x %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 			&vendor, &device, &subvendor, &subdevice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 			&class, &class_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if (fields < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	spin_lock(&pdrv->dynids.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		struct pci_device_id *id = &dynid->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		if ((id->vendor == vendor) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		    (id->device == device) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		    (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		    (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		    !((id->class ^ class) & class_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 			list_del(&dynid->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 			kfree(dynid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			retval = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	spin_unlock(&pdrv->dynids.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) static DRIVER_ATTR_WO(remove_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) static struct attribute *pci_drv_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	&driver_attr_new_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	&driver_attr_remove_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) ATTRIBUTE_GROUPS(pci_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  * pci_match_id - See if a pci device matches a given pci_id table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * @ids: array of PCI device id structures to search in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * @dev: the PCI device structure to match against.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  * Used by a driver to check whether a PCI device present in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  * system is in its list of supported devices.  Returns the matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * pci_device_id structure or %NULL if there is no match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * Deprecated, don't use this as it will not catch any dynamic ids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * that a driver might want to check for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 					 struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		while (ids->vendor || ids->subvendor || ids->class_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			if (pci_match_one_device(ids, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 				return ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			ids++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) EXPORT_SYMBOL(pci_match_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) static const struct pci_device_id pci_device_id_any = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	.vendor = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	.device = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	.subvendor = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	.subdevice = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * @drv: the PCI driver to match against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  * @dev: the PCI device structure to match against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  * Used by a driver to check whether a PCI device present in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * system is in its list of supported devices.  Returns the matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * pci_device_id structure or %NULL if there is no match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 						    struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	struct pci_dynid *dynid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	const struct pci_device_id *found_id = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	/* When driver_override is set, only bind to the matching driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	if (dev->driver_override && strcmp(dev->driver_override, drv->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	/* Look at the dynamic ids first, before the static ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	spin_lock(&drv->dynids.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	list_for_each_entry(dynid, &drv->dynids.list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		if (pci_match_one_device(&dynid->id, dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			found_id = &dynid->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	spin_unlock(&drv->dynids.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (!found_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		found_id = pci_match_id(drv->id_table, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	/* driver_override will always match, send a dummy id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	if (!found_id && dev->driver_override)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		found_id = &pci_device_id_any;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	return found_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) struct drv_dev_and_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	struct pci_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	const struct pci_device_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) static long local_pci_probe(void *_ddi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	struct drv_dev_and_id *ddi = _ddi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	struct pci_dev *pci_dev = ddi->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	struct pci_driver *pci_drv = ddi->drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	struct device *dev = &pci_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	 * Unbound PCI devices are always put in D0, regardless of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	 * runtime PM status.  During probe, the device is set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	 * active and the usage count is incremented.  If the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	 * supports runtime PM, it should call pm_runtime_put_noidle(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	 * or any other runtime PM helper function decrementing the usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	 * count, in its probe routine and pm_runtime_get_noresume() in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	 * its remove routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	pci_dev->driver = pci_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	rc = pci_drv->probe(pci_dev, ddi->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		pci_dev->driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	 * Probe function should return < 0 for failure, 0 for success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	 * Treat values > 0 as success, but warn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	pci_warn(pci_dev, "Driver probe function unexpectedly returned %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		 rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) static bool pci_physfn_is_probed(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) #ifdef CONFIG_PCI_IOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	return dev->is_virtfn && dev->physfn->is_probed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			  const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	int error, node, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	struct drv_dev_and_id ddi = { drv, dev, id };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	 * Execute driver initialization on node where the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	 * attached.  This way the driver likely allocates its local memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	 * on the right node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	node = dev_to_node(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	dev->is_probed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	cpu_hotplug_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * Prevent nesting work_on_cpu() for the case where a Virtual Function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * device is probed from work_on_cpu() of the Physical device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	    pci_physfn_is_probed(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		cpu = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		cpu = cpumask_any_and(cpumask_of_node(node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 				      housekeeping_cpumask(hk_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	if (cpu < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		error = work_on_cpu(cpu, local_pci_probe, &ddi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		error = local_pci_probe(&ddi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	dev->is_probed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	cpu_hotplug_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  * __pci_device_probe - check if a driver wants to claim a specific PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  * @drv: driver to call to check if it wants the PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  * @pci_dev: PCI device being probed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  * returns 0 on success, else error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	const struct pci_device_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	if (!pci_dev->driver && drv->probe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		id = pci_match_device(drv, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		if (id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			error = pci_call_probe(drv, pci_dev, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) int __weak pcibios_alloc_irq(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) void __weak pcibios_free_irq(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) #ifdef CONFIG_PCI_IOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static inline bool pci_device_can_probe(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		pdev->driver_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) static inline bool pci_device_can_probe(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) static int pci_device_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	struct pci_driver *drv = to_pci_driver(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	if (!pci_device_can_probe(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	pci_assign_irq(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	error = pcibios_alloc_irq(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	pci_dev_get(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	error = __pci_device_probe(drv, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		pcibios_free_irq(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		pci_dev_put(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static int pci_device_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct pci_driver *drv = pci_dev->driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (drv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		if (drv->remove) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			drv->remove(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		pcibios_free_irq(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		pci_dev->driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		pci_iov_remove(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	/* Undo the runtime PM settings in local_pci_probe() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	 * If the device is still on, set the power state as "unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	 * since it might change by the next time we load the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	if (pci_dev->current_state == PCI_D0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		pci_dev->current_state = PCI_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	 * We would love to complain here if pci_dev->is_enabled is set, that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	 * the driver should have called pci_disable_device(), but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	 * unfortunate fact is there are too many odd BIOS and bridge setups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	 * that don't like drivers doing that all of the time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	 * Oh well, we can dream of sane hardware when we sleep, no matter how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * horrible the crap we have to deal with is when we are awake...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	pci_dev_put(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static void pci_device_shutdown(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	struct pci_driver *drv = pci_dev->driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	pm_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	if (drv && drv->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		drv->shutdown(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	 * If this is a kexec reboot, turn off Bus Master bit on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	 * device to tell it to not continue to do DMA. Don't touch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	 * devices in D3cold or unknown states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	 * If it is not a kexec reboot, firmware will hit the PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	 * devices with big hammer and stop their DMA any way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		pci_clear_master(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) /* Auxiliary functions used for system resume and run-time resume. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  * pci_restore_standard_config - restore standard config registers of PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  * @pci_dev: PCI device to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) static int pci_restore_standard_config(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	pci_update_current_state(pci_dev, PCI_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	if (pci_dev->current_state != PCI_D0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		int error = pci_set_power_state(pci_dev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	pci_restore_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	pci_pme_restore(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) static void pci_pm_default_resume(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	pci_fixup_device(pci_fixup_resume, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	pci_enable_wake(pci_dev, PCI_D0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	pci_power_up(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	pci_update_current_state(pci_dev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	pci_restore_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	pci_pme_restore(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  * Default "suspend" method for devices that have no driver provided suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  * or not even a driver at all (second part).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	 * mark its power state as "unknown", since we don't know if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	 * e.g. the BIOS will change its device state when we suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	if (pci_dev->current_state == PCI_D0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		pci_dev->current_state = PCI_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  * Default "resume" method for devices that have no driver provided resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * or not even a driver at all (second part).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static int pci_pm_reenable_device(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	/* if the device was enabled before suspend, reenable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	retval = pci_reenable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	 * if the device was busmaster before the suspend, make it busmaster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	 * again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (pci_dev->is_busmaster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		pci_set_master(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) static int pci_legacy_suspend(struct device *dev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	struct pci_driver *drv = pci_dev->driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	if (drv && drv->suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		pci_power_t prev = pci_dev->current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		error = drv->suspend(pci_dev, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		suspend_report_result(drv->suspend, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		    && pci_dev->current_state != PCI_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 				      "PCI PM: Device state not saved by %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 				      drv->suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	pci_fixup_device(pci_fixup_suspend, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (!pci_dev->state_saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		pci_save_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	pci_pm_set_unknown_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	pci_fixup_device(pci_fixup_suspend_late, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) static int pci_legacy_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	struct pci_driver *drv = pci_dev->driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	pci_fixup_device(pci_fixup_resume, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	return drv && drv->resume ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) /* Auxiliary functions used by the new power management framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) static void pci_pm_default_suspend(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	/* Disable non-bridge devices without PM support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	if (!pci_has_subordinate(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		pci_disable_enabled_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	struct pci_driver *drv = pci_dev->driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	bool ret = drv && (drv->suspend || drv->resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 * Legacy PM support is used by default, so warn if the new framework is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 * supported as well.  Drivers are supposed to support either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	 * former, or the latter, but not both at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	pci_WARN(pci_dev, ret && drv->driver.pm, "device %04x:%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		 pci_dev->vendor, pci_dev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) /* New power management framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) static int pci_pm_prepare(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (pm && pm->prepare) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		int error = pm->prepare(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	if (pci_dev_need_resume(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	 * The PME setting needs to be adjusted here in case the direct-complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 * optimization is used with respect to this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	pci_dev_adjust_pme(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) static void pci_pm_complete(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	pci_dev_complete_resume(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	pm_generic_complete(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	/* Resume device if platform firmware has put it in reset-power-on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		pci_power_t pre_sleep_state = pci_dev->current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		pci_refresh_power_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		 * On platforms with ACPI this check may also trigger for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		 * devices sharing power resources if one of those power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		 * resources has been activated as a result of a change of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		 * power state of another device sharing it.  However, in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		 * case it is also better to resume the device, in general.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		if (pci_dev->current_state < pre_sleep_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			pm_request_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) #else /* !CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) #define pci_pm_prepare	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) #define pci_pm_complete	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) #endif /* !CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) #ifdef CONFIG_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static void pcie_pme_root_status_cleanup(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	 * Some BIOSes forget to clear Root PME Status bits after system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * wakeup, which breaks ACPI-based runtime wakeup on PCI Express.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 * Clear those bits now just in case (shouldn't hurt).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	if (pci_is_pcie(pci_dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	    (pci_pcie_type(pci_dev) == PCI_EXP_TYPE_ROOT_PORT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	     pci_pcie_type(pci_dev) == PCI_EXP_TYPE_RC_EC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		pcie_clear_root_pme_status(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) static int pci_pm_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	pci_dev->skip_bus_pm = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		return pci_legacy_suspend(dev, PMSG_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	if (!pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		pci_pm_default_suspend(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	 * PCI devices suspended at run time may need to be resumed at this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	 * point, because in general it may be necessary to reconfigure them for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	 * system suspend.  Namely, if the device is expected to wake up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 * system from the sleep state, it may have to be reconfigured for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	 * purpose, or if the device is not expected to wake up the system from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	 * the sleep state, it should be prevented from signaling wakeup events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 * going forward.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	 * Also if the driver of the device does not indicate that its system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	 * suspend callbacks can cope with runtime-suspended devices, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	 * better to resume the device from runtime suspend here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	    pci_dev_need_resume(pci_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		pm_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		pci_dev->state_saved = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		pci_dev_adjust_pme(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	if (pm->suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		pci_power_t prev = pci_dev->current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		error = pm->suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		suspend_report_result(pm->suspend, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		    && pci_dev->current_state != PCI_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 				      "PCI PM: State of device not saved by %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 				      pm->suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static int pci_pm_suspend_late(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	if (dev_pm_skip_suspend(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	return pm_generic_suspend_late(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) static int pci_pm_suspend_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (dev_pm_skip_suspend(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (!pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		pci_save_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		goto Fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	if (pm->suspend_noirq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		pci_power_t prev = pci_dev->current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		error = pm->suspend_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		suspend_report_result(pm->suspend_noirq, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		    && pci_dev->current_state != PCI_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 				      "PCI PM: State of device not saved by %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 				      pm->suspend_noirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			goto Fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	if (pci_dev->skip_bus_pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		 * Either the device is a bridge with a child in D0 below it, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		 * the function is running for the second time in a row without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		 * going through full resume, which is possible only during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		 * suspend-to-idle in a spurious wakeup case.  The device should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		 * be in D0 at this point, but if it is a bridge, it may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		 * necessary to save its state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		if (!pci_dev->state_saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			pci_save_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	} else if (!pci_dev->state_saved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		pci_save_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		if (pci_power_manageable(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			pci_prepare_to_sleep(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	pci_dbg(pci_dev, "PCI PM: Suspend power state: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		pci_power_name(pci_dev->current_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (pci_dev->current_state == PCI_D0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		pci_dev->skip_bus_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		 * Per PCI PM r1.2, table 6-1, a bridge must be in D0 if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		 * downstream device is in D0, so avoid changing the power state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		 * of the parent bridge by setting the skip_bus_pm flag for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		if (pci_dev->bus->self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			pci_dev->bus->self->skip_bus_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		pci_dbg(pci_dev, "PCI PM: Skipped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		goto Fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	pci_pm_set_unknown_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 * PCI COMMAND register isn't 0, the BIOS assumes that the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 * hasn't been quiesced and tries to turn it off.  If the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	 * is already in D3, this can hang or cause memory corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	 * Since the value of the COMMAND register doesn't matter once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	 * device has been suspended, we can safely set it to 0 here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		pci_write_config_word(pci_dev, PCI_COMMAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) Fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	pci_fixup_device(pci_fixup_suspend_late, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	 * If the target system sleep state is suspend-to-idle, it is sufficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 * to check whether or not the device's wakeup settings are good for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 * runtime PM.  Otherwise, the pm_resume_via_firmware() check will cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 * pci_pm_complete() to take care of fixing up the device's state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	 * anyway, if need be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	if (device_can_wakeup(dev) && !device_may_wakeup(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		dev->power.may_skip_resume = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) static int pci_pm_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	pci_power_t prev_state = pci_dev->current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	bool skip_bus_pm = pci_dev->skip_bus_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (dev_pm_skip_resume(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	 * In the suspend-to-idle case, devices left in D0 during suspend will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 * stay in D0, so it is not necessary to restore or update their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	 * configuration here and attempting to put them into D0 again is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	 * pointless, so avoid doing that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	if (!(skip_bus_pm && pm_suspend_no_platform()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		pci_pm_default_resume_early(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	pci_fixup_device(pci_fixup_resume_early, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	pcie_pme_root_status_cleanup(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if (!skip_bus_pm && prev_state == PCI_D3cold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		pci_bridge_wait_for_secondary_bus(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (pm && pm->resume_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		return pm->resume_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) static int pci_pm_resume_early(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	if (dev_pm_skip_resume(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	return pm_generic_resume_early(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) static int pci_pm_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	 * This is necessary for the suspend error path in which resume is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 * called without restoring the standard config registers of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (pci_dev->state_saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		pci_restore_standard_config(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		return pci_legacy_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	pci_pm_default_resume(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	if (pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		if (pm->resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			return pm->resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		pci_pm_reenable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) #else /* !CONFIG_SUSPEND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) #define pci_pm_suspend		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) #define pci_pm_suspend_late	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) #define pci_pm_suspend_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) #define pci_pm_resume		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) #define pci_pm_resume_early	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) #define pci_pm_resume_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) #endif /* !CONFIG_SUSPEND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) #ifdef CONFIG_HIBERNATE_CALLBACKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) static int pci_pm_freeze(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		return pci_legacy_suspend(dev, PMSG_FREEZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	if (!pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		pci_pm_default_suspend(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	 * Resume all runtime-suspended devices before creating a snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	 * image of system memory, because the restore kernel generally cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	 * be expected to always handle them consistently and they need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	 * put into the runtime-active metastate during system resume anyway,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	 * so it is better to ensure that the state saved in the image will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	 * always consistent with that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	pm_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	pci_dev->state_saved = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (pm->freeze) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		error = pm->freeze(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		suspend_report_result(pm->freeze, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static int pci_pm_freeze_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		return pci_legacy_suspend_late(dev, PMSG_FREEZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (pm && pm->freeze_noirq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		error = pm->freeze_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		suspend_report_result(pm->freeze_noirq, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	if (!pci_dev->state_saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		pci_save_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	pci_pm_set_unknown_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static int pci_pm_thaw_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 * The pm->thaw_noirq() callback assumes the device has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	 * returned to D0 and its config state has been restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	 * In addition, pci_restore_state() restores MSI-X state in MMIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	 * space, which requires the device to be in D0, so return it to D0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	 * in case the driver's "freeze" callbacks put it into a low-power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	 * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	pci_set_power_state(pci_dev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	pci_restore_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	if (pm && pm->thaw_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		return pm->thaw_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static int pci_pm_thaw(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		return pci_legacy_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		if (pm->thaw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			error = pm->thaw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		pci_pm_reenable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	pci_dev->state_saved = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static int pci_pm_poweroff(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		return pci_legacy_suspend(dev, PMSG_HIBERNATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	if (!pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		pci_pm_default_suspend(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	/* The reason to do that is the same as in pci_pm_suspend(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	    pci_dev_need_resume(pci_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		pm_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		pci_dev->state_saved = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		pci_dev_adjust_pme(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	if (pm->poweroff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		error = pm->poweroff(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		suspend_report_result(pm->poweroff, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static int pci_pm_poweroff_late(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	if (dev_pm_skip_suspend(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	return pm_generic_poweroff_late(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int pci_pm_poweroff_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	if (dev_pm_skip_suspend(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	if (!pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		pci_fixup_device(pci_fixup_suspend_late, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	if (pm->poweroff_noirq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		error = pm->poweroff_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		suspend_report_result(pm->poweroff_noirq, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		pci_prepare_to_sleep(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	 * The reason for doing this here is the same as for the analogous code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	 * in pci_pm_suspend_noirq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		pci_write_config_word(pci_dev, PCI_COMMAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	pci_fixup_device(pci_fixup_suspend_late, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static int pci_pm_restore_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	pci_pm_default_resume_early(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	pci_fixup_device(pci_fixup_resume_early, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (pm && pm->restore_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		return pm->restore_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static int pci_pm_restore(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	 * This is necessary for the hibernation error path in which restore is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	 * called without restoring the standard config registers of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	if (pci_dev->state_saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		pci_restore_standard_config(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	if (pci_has_legacy_pm_support(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		return pci_legacy_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	pci_pm_default_resume(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	if (pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		if (pm->restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			return pm->restore(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		pci_pm_reenable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) #else /* !CONFIG_HIBERNATE_CALLBACKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) #define pci_pm_freeze		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) #define pci_pm_freeze_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) #define pci_pm_thaw		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) #define pci_pm_thaw_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) #define pci_pm_poweroff		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) #define pci_pm_poweroff_late	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) #define pci_pm_poweroff_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) #define pci_pm_restore		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) #define pci_pm_restore_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) #endif /* !CONFIG_HIBERNATE_CALLBACKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static int pci_pm_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	pci_power_t prev = pci_dev->current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	 * If pci_dev->driver is not set (unbound), we leave the device in D0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	 * but it may go to D3cold when the bridge above it runtime suspends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	 * Save its config space in case that happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (!pci_dev->driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		pci_save_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	pci_dev->state_saved = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (pm && pm->runtime_suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		error = pm->runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		 * -EBUSY and -EAGAIN is used to request the runtime PM core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		 * to schedule a new suspend, so log the event only with debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		 * log level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		if (error == -EBUSY || error == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			pci_dbg(pci_dev, "can't suspend now (%ps returned %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 				pm->runtime_suspend, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		} else if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			pci_err(pci_dev, "can't suspend (%ps returned %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 				pm->runtime_suspend, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	pci_fixup_device(pci_fixup_suspend, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	if (pm && pm->runtime_suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	    && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	    && pci_dev->current_state != PCI_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			      "PCI PM: State of device not saved by %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 			      pm->runtime_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	if (!pci_dev->state_saved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		pci_save_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		pci_finish_runtime_suspend(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static int pci_pm_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	pci_power_t prev_state = pci_dev->current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	 * Restoring config space is necessary even if the device is not bound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	 * to a driver because although we left it in D0, it may have gone to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	 * D3cold when the bridge above it runtime suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	pci_restore_standard_config(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if (!pci_dev->driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	pci_fixup_device(pci_fixup_resume_early, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	pci_pm_default_resume(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (prev_state == PCI_D3cold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		pci_bridge_wait_for_secondary_bus(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	if (pm && pm->runtime_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		error = pm->runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	pci_dev->runtime_d3cold = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static int pci_pm_runtime_idle(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	 * If pci_dev->driver is not set (unbound), the device should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	 * always remain in D0 regardless of the runtime PM status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	if (!pci_dev->driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	if (!pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	if (pm->runtime_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		return pm->runtime_idle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static const struct dev_pm_ops pci_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	.prepare = pci_pm_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	.complete = pci_pm_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	.suspend = pci_pm_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	.suspend_late = pci_pm_suspend_late,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	.resume = pci_pm_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	.resume_early = pci_pm_resume_early,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	.freeze = pci_pm_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	.thaw = pci_pm_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	.poweroff = pci_pm_poweroff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	.poweroff_late = pci_pm_poweroff_late,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	.restore = pci_pm_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	.suspend_noirq = pci_pm_suspend_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	.resume_noirq = pci_pm_resume_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	.freeze_noirq = pci_pm_freeze_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	.thaw_noirq = pci_pm_thaw_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	.poweroff_noirq = pci_pm_poweroff_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	.restore_noirq = pci_pm_restore_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	.runtime_suspend = pci_pm_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	.runtime_resume = pci_pm_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	.runtime_idle = pci_pm_runtime_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) #define PCI_PM_OPS_PTR	(&pci_dev_pm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) #else /* !CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) #define pci_pm_runtime_suspend	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) #define pci_pm_runtime_resume	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) #define pci_pm_runtime_idle	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) #define PCI_PM_OPS_PTR	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) #endif /* !CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)  * __pci_register_driver - register a new pci driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)  * @drv: the driver structure to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)  * @owner: owner module of drv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)  * @mod_name: module name string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  * Adds the driver structure to the list of registered drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  * Returns a negative value on error, otherwise 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  * If no error occurred, the driver remains registered even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  * no device was claimed during registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) int __pci_register_driver(struct pci_driver *drv, struct module *owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			  const char *mod_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	/* initialize common driver fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	drv->driver.name = drv->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	drv->driver.bus = &pci_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	drv->driver.owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	drv->driver.mod_name = mod_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	drv->driver.groups = drv->groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	spin_lock_init(&drv->dynids.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	INIT_LIST_HEAD(&drv->dynids.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	/* register with core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	return driver_register(&drv->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) EXPORT_SYMBOL(__pci_register_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)  * pci_unregister_driver - unregister a pci driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)  * @drv: the driver structure to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  * Deletes the driver structure from the list of registered PCI drivers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  * gives it a chance to clean up by calling its remove() function for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  * each device it was responsible for, and marks those devices as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  * driverless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) void pci_unregister_driver(struct pci_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	driver_unregister(&drv->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	pci_free_dynids(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) EXPORT_SYMBOL(pci_unregister_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static struct pci_driver pci_compat_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	.name = "compat"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)  * pci_dev_driver - get the pci_driver of a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)  * @dev: the device to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)  * Returns the appropriate pci_driver structure or %NULL if there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)  * registered driver for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	if (dev->driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		return dev->driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		for (i = 0; i <= PCI_ROM_RESOURCE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 			if (dev->resource[i].flags & IORESOURCE_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 				return &pci_compat_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) EXPORT_SYMBOL(pci_dev_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)  * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)  * @dev: the PCI device structure to match against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)  * @drv: the device driver to search for matching PCI device id structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)  * Used by a driver to check whether a PCI device present in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)  * system is in its list of supported devices. Returns the matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)  * pci_device_id structure or %NULL if there is no match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static int pci_bus_match(struct device *dev, struct device_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct pci_dev *pci_dev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	struct pci_driver *pci_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	const struct pci_device_id *found_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	if (!pci_dev->match_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	pci_drv = to_pci_driver(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	found_id = pci_match_device(pci_drv, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	if (found_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)  * pci_dev_get - increments the reference count of the pci device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)  * @dev: the device being referenced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)  * Each live reference to a device should be refcounted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)  * Drivers for PCI devices should normally record such references in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)  * their probe() methods, when they bind to a device, and release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)  * them by calling pci_dev_put(), in their disconnect() methods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)  * A pointer to the device with the incremented reference counter is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct pci_dev *pci_dev_get(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		get_device(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) EXPORT_SYMBOL(pci_dev_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)  * pci_dev_put - release a use of the pci device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)  * @dev: device that's been disconnected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)  * Must be called when a user of a device is finished with it.  When the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)  * user of the device calls this function, the memory of the device is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) void pci_dev_put(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		put_device(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) EXPORT_SYMBOL(pci_dev_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			   pdev->subsystem_device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 			   pdev->vendor, pdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			   pdev->subsystem_vendor, pdev->subsystem_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			   (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			   (u8)(pdev->class)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)  * pci_uevent_ers - emit a uevent during recovery path of PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)  * @pdev: PCI device undergoing error recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)  * @err_type: type of error event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	char *envp[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	switch (err_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	case PCI_ERS_RESULT_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	case PCI_ERS_RESULT_CAN_RECOVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		envp[idx++] = "DEVICE_ONLINE=0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	case PCI_ERS_RESULT_RECOVERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		envp[idx++] = "DEVICE_ONLINE=1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	case PCI_ERS_RESULT_DISCONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		envp[idx++] = "DEVICE_ONLINE=0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	if (idx > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		envp[idx++] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) static int pci_bus_num_vf(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	return pci_num_vf(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)  * pci_dma_configure - Setup DMA configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)  * @dev: ptr to dev structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)  * Function to update PCI devices's DMA configuration using the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)  * info from the OF node or ACPI node of host bridge's parent (if any).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) static int pci_dma_configure(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	struct device *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	bridge = pci_get_host_bridge_device(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	if (IS_ENABLED(CONFIG_OF) && bridge->parent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	    bridge->parent->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		ret = of_dma_configure(dev, bridge->parent->of_node, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	} else if (has_acpi_companion(bridge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		ret = acpi_dma_configure(dev, acpi_get_dma_attr(adev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	pci_put_host_bridge_device(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct bus_type pci_bus_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	.name		= "pci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	.match		= pci_bus_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	.uevent		= pci_uevent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	.probe		= pci_device_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	.remove		= pci_device_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	.shutdown	= pci_device_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	.dev_groups	= pci_dev_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	.bus_groups	= pci_bus_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	.drv_groups	= pci_drv_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	.pm		= PCI_PM_OPS_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	.num_vf		= pci_bus_num_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	.dma_configure	= pci_dma_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) EXPORT_SYMBOL(pci_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) #ifdef CONFIG_PCIEPORTBUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	struct pcie_device *pciedev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	struct pcie_port_service_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	pciedev = to_pcie_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	driver = to_service_driver(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	if (driver->service != pciedev->service)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	if (driver->port_type != PCIE_ANY_PORT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	    driver->port_type != pci_pcie_type(pciedev->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) struct bus_type pcie_port_bus_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	.name		= "pci_express",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	.match		= pcie_port_bus_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) EXPORT_SYMBOL_GPL(pcie_port_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) static int __init pci_driver_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	ret = bus_register(&pci_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) #ifdef CONFIG_PCIEPORTBUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	ret = bus_register(&pcie_port_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	dma_debug_add_bus(&pci_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) postcore_initcall(pci_driver_init);