Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * PCIe Native PME support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2007 - 2009 Intel Corp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #define dev_fmt(fmt) "PME: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "portdrv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * If this switch is set, MSI will not be used for PCIe PME signaling.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * causes the PCIe port driver to use INTx interrupts only, but it turns out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * wake-up from system sleep states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) bool pcie_pme_msi_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static int __init pcie_pme_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	if (!strncmp(str, "nomsi", 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		pcie_pme_msi_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) __setup("pcie_pme=", pcie_pme_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) struct pcie_pme_service_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	struct pcie_device *srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	bool noirq; /* If set, keep the PME interrupt disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * @dev: PCIe root port or event collector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * @enable: Enable or disable the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		pcie_capability_set_word(dev, PCI_EXP_RTCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 					 PCI_EXP_RTCTL_PMEIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		pcie_capability_clear_word(dev, PCI_EXP_RTCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 					   PCI_EXP_RTCTL_PMEIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * @bus: PCI bus to scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * Scan given PCI bus and all buses under it for devices asserting PME#.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static bool pcie_pme_walk_bus(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	list_for_each_entry(dev, &bus->devices, bus_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		/* Skip PCIe devices in case we started from a root port. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			if (dev->pme_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 				dev->pme_poll = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			pci_wakeup_event(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			pm_request_resume(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * @bus: Secondary bus of the bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * @devfn: Device/function number to check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * PCIe PME message.  In such that case the bridge should use the Requester ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * of device/function number 0 on its secondary bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	dev = pci_dev_get(bus->self);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		down_read(&pci_bus_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		if (pcie_pme_walk_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		up_read(&pci_bus_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	pci_dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * pcie_pme_handle_request - Find device that generated PME and handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * @port: Root port or event collector that generated the PME interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  * @req_id: PCIe Requester ID of the device that generated the PME.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	u8 busnr = req_id >> 8, devfn = req_id & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	struct pci_bus *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	/* First, check if the PME is from the root port itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (port->devfn == devfn && port->bus->number == busnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		if (port->pme_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			port->pme_poll = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		if (pci_check_pme_status(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			pm_request_resume(&port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			 * Apparently, the root port generated the PME on behalf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			 * of a non-PCIe device downstream.  If this is done by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			 * a root port, the Requester ID field in its status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			 * register may contain either the root port's, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			 * source device's information (PCI Express Base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			 * Specification, Rev. 2.0, Section 6.1.9).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			down_read(&pci_bus_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			found = pcie_pme_walk_bus(port->subordinate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			up_read(&pci_bus_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	/* Second, find the bus the source device is on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (!bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	/* Next, check if the PME is from a PCIe-PCI bridge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	found = pcie_pme_from_pci_bridge(bus, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	/* Finally, try to find the PME source on the bus. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	down_read(&pci_bus_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	list_for_each_entry(dev, &bus->devices, bus_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		pci_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		if (dev->devfn == devfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		pci_dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	up_read(&pci_bus_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		/* The device is there, but we have to check its PME status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		found = pci_check_pme_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			if (dev->pme_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 				dev->pme_poll = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			pci_wakeup_event(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			pm_request_resume(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		pci_dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	} else if (devfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		 * The device is not there, but we can still try to recover by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		 * assuming that the PME was reported by a PCIe-PCI bridge that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		 * used devfn different from zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		pci_info(port, "interrupt generated for non-existent device %02x:%02x.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			 busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		found = pcie_pme_from_pci_bridge(bus, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		pci_info(port, "Spurious native interrupt!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  * pcie_pme_work_fn - Work handler for PCIe PME interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  * @work: Work structure giving access to service data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void pcie_pme_work_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	struct pcie_pme_service_data *data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			container_of(work, struct pcie_pme_service_data, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	struct pci_dev *port = data->srv->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	u32 rtsta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	spin_lock_irq(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		if (data->noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		if (rtsta == (u32) ~0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		if (rtsta & PCI_EXP_RTSTA_PME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			 * Clear PME status of the port.  If there are other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			 * pending PMEs, the status will be set again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			pcie_clear_root_pme_status(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			spin_unlock_irq(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			pcie_pme_handle_request(port, rtsta & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			spin_lock_irq(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		/* No need to loop if there are no more PMEs pending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		if (!(rtsta & PCI_EXP_RTSTA_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		spin_unlock_irq(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		spin_lock_irq(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if (!data->noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		pcie_pme_interrupt_enable(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	spin_unlock_irq(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * @irq: Interrupt vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * @context: Interrupt context pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static irqreturn_t pcie_pme_irq(int irq, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	struct pci_dev *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	struct pcie_pme_service_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	u32 rtsta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	port = ((struct pcie_device *)context)->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	data = get_service_data((struct pcie_device *)context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	spin_lock_irqsave(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		spin_unlock_irqrestore(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	pcie_pme_interrupt_enable(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	spin_unlock_irqrestore(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	/* We don't use pm_wq, because it's freezable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	schedule_work(&data->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * pcie_pme_can_wakeup - Set the wakeup capability flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * @dev: PCI device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * @ign: Ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static int pcie_pme_can_wakeup(struct pci_dev *dev, void *ign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	device_set_wakeup_capable(&dev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * pcie_pme_mark_devices - Set the wakeup flag for devices below a port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * @port: PCIe root port or event collector to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * For each device below given root port, including the port itself (or for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  * root complex integrated endpoint if @port is a root complex event collector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * set the flag indicating that it can signal run-time wake-up events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static void pcie_pme_mark_devices(struct pci_dev *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	pcie_pme_can_wakeup(port, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	if (port->subordinate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		pci_walk_bus(port->subordinate, pcie_pme_can_wakeup, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * pcie_pme_probe - Initialize PCIe PME service for given root port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * @srv: PCIe service to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static int pcie_pme_probe(struct pcie_device *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	struct pci_dev *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	struct pcie_pme_service_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	spin_lock_init(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	INIT_WORK(&data->work, pcie_pme_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	data->srv = srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	set_service_data(srv, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	port = srv->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	pcie_pme_interrupt_enable(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	pcie_clear_root_pme_status(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	pci_info(port, "Signaling with IRQ %d\n", srv->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	pcie_pme_mark_devices(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	pcie_pme_interrupt_enable(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static bool pcie_pme_check_wakeup(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	if (!bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	list_for_each_entry(dev, &bus->devices, bus_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		if (device_may_wakeup(&dev->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		    || pcie_pme_check_wakeup(dev->subordinate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static void pcie_pme_disable_interrupt(struct pci_dev *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 				       struct pcie_pme_service_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	spin_lock_irq(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	pcie_pme_interrupt_enable(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	pcie_clear_root_pme_status(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	data->noirq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	spin_unlock_irq(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * pcie_pme_suspend - Suspend PCIe PME service device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  * @srv: PCIe service device to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static int pcie_pme_suspend(struct pcie_device *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	struct pcie_pme_service_data *data = get_service_data(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	struct pci_dev *port = srv->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	bool wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (device_may_wakeup(&port->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		wakeup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		down_read(&pci_bus_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		wakeup = pcie_pme_check_wakeup(port->subordinate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		up_read(&pci_bus_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (wakeup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		ret = enable_irq_wake(srv->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	pcie_pme_disable_interrupt(port, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	synchronize_irq(srv->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)  * pcie_pme_resume - Resume PCIe PME service device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)  * @srv: PCIe service device to resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static int pcie_pme_resume(struct pcie_device *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	struct pcie_pme_service_data *data = get_service_data(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	spin_lock_irq(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (data->noirq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		struct pci_dev *port = srv->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		pcie_clear_root_pme_status(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		pcie_pme_interrupt_enable(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		data->noirq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		disable_irq_wake(srv->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	spin_unlock_irq(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  * pcie_pme_remove - Prepare PCIe PME service device for removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  * @srv: PCIe service device to remove.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void pcie_pme_remove(struct pcie_device *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	struct pcie_pme_service_data *data = get_service_data(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	pcie_pme_disable_interrupt(srv->port, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	free_irq(srv->irq, srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	cancel_work_sync(&data->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static struct pcie_port_service_driver pcie_pme_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	.name		= "pcie_pme",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	.port_type	= PCI_EXP_TYPE_ROOT_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	.service	= PCIE_PORT_SERVICE_PME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	.probe		= pcie_pme_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	.suspend	= pcie_pme_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	.resume		= pcie_pme_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	.remove		= pcie_pme_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * pcie_pme_service_init - Register the PCIe PME service driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) int __init pcie_pme_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	return pcie_port_service_register(&pcie_pme_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }