Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) Microsoft Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *   Jake Oshins <jakeo@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * This driver acts as a paravirtual front-end for PCI Express root buses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * When a PCI Express function (either an entire device or an SR-IOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Virtual Function) is being passed through to the VM, this driver exposes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * a new bus to the guest VM.  This is modeled as a root PCI bus because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * no bridges are being exposed to the VM.  In fact, with a "Generation 2"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * until a device as been exposed using this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * Each root PCI bus has its own PCI domain, which is called "Segment" in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * the PCI Firmware Specifications.  Thus while each device passed through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * to the VM using this front-end will appear at "device 0", the domain will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * be unique.  Typically, each bus will have one PCI function on it, though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * this driver does support more than one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * In order to map the interrupts from the device through to the guest VM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * this driver also implements an IRQ Domain, which handles interrupts (either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * MSI or MSI-X) associated with the functions on the bus.  As interrupts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * set up, torn down, or reaffined, this driver communicates with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * underlying hypervisor to adjust the mappings in the I/O MMU so that each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * interrupt will be delivered to the correct virtual processor at the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * vector.  This driver does not support level-triggered (line-based)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * interrupts, and will report that the Interrupt Line register in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * function's configuration space is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * facilities.  For instance, the configuration space of a function exposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * by Hyper-V is mapped into a single page of memory space, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * read and write handlers for config space must be aware of this mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * Similarly, device setup and teardown involves messages sent to and from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * the PCI back-end driver in Hyper-V.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/semaphore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <asm/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/hyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <asm/mshyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * Protocol versions. The low word is the minor version, the high word the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * major version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) enum pci_protocol_version_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),	/* Win10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2),	/* RS1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3),	/* Vibranium */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define CPU_AFFINITY_ALL	-1ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * Supported protocol versions in the order of probing - highest go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static enum pci_protocol_version_t pci_protocol_versions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	PCI_PROTOCOL_VERSION_1_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	PCI_PROTOCOL_VERSION_1_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	PCI_PROTOCOL_VERSION_1_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define PCI_CONFIG_MMIO_LENGTH	0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define CFG_PAGE_OFFSET 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define MAX_SUPPORTED_MSI_MESSAGES 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define STATUS_REVISION_MISMATCH 0xC0000059
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) /* space for 32bit serial number as string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define SLOT_NAME_SIZE 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  * Message Types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) enum pci_message_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	 * Version 1.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	PCI_MESSAGE_BASE                = 0x42490000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	PCI_BUS_RELATIONS               = PCI_MESSAGE_BASE + 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	PCI_QUERY_BUS_RELATIONS         = PCI_MESSAGE_BASE + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	PCI_POWER_STATE_CHANGE          = PCI_MESSAGE_BASE + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	PCI_QUERY_RESOURCE_RESOURCES    = PCI_MESSAGE_BASE + 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	PCI_BUS_D0ENTRY                 = PCI_MESSAGE_BASE + 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	PCI_BUS_D0EXIT                  = PCI_MESSAGE_BASE + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	PCI_READ_BLOCK                  = PCI_MESSAGE_BASE + 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	PCI_WRITE_BLOCK                 = PCI_MESSAGE_BASE + 0xA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	PCI_EJECT                       = PCI_MESSAGE_BASE + 0xB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	PCI_QUERY_STOP                  = PCI_MESSAGE_BASE + 0xC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	PCI_REENABLE                    = PCI_MESSAGE_BASE + 0xD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	PCI_QUERY_STOP_FAILED           = PCI_MESSAGE_BASE + 0xE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	PCI_EJECTION_COMPLETE           = PCI_MESSAGE_BASE + 0xF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	PCI_RESOURCES_ASSIGNED          = PCI_MESSAGE_BASE + 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	PCI_RESOURCES_RELEASED          = PCI_MESSAGE_BASE + 0x11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	PCI_INVALIDATE_BLOCK            = PCI_MESSAGE_BASE + 0x12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	PCI_QUERY_PROTOCOL_VERSION      = PCI_MESSAGE_BASE + 0x13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	PCI_CREATE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	PCI_DELETE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	PCI_RESOURCES_ASSIGNED2		= PCI_MESSAGE_BASE + 0x16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	PCI_CREATE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x17,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	PCI_DELETE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x18, /* unused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	PCI_BUS_RELATIONS2		= PCI_MESSAGE_BASE + 0x19,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	PCI_MESSAGE_MAXIMUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  * Structures defining the virtual PCI Express protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) union pci_version {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		u16 minor_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		u16 major_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	} parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * Function numbers are 8-bits wide on Express, as interpreted through ARI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * which is all this driver does.  This representation is the one used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  * Windows, which is what is expected when sending this back and forth with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * the Hyper-V parent partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) union win_slot_encoding {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		u32	dev:5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		u32	func:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		u32	reserved:24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	} bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	u32 slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * Pretty much as defined in the PCI Specifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) struct pci_function_description {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	u16	v_id;	/* vendor ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	u16	d_id;	/* device ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	u8	rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	u8	prog_intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	u8	subclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	u8	base_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	u32	subsystem_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	union win_slot_encoding win_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	u32	ser;	/* serial number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) enum pci_device_description_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	HV_PCI_DEVICE_FLAG_NONE			= 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	HV_PCI_DEVICE_FLAG_NUMA_AFFINITY	= 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) struct pci_function_description2 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	u16	v_id;	/* vendor ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	u16	d_id;	/* device ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	u8	rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	u8	prog_intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	u8	subclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	u8	base_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	u32	subsystem_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	union	win_slot_encoding win_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	u32	ser;	/* serial number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	u32	flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	u16	virtual_numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	u16	reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * struct hv_msi_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * @vector:		IDT entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * @delivery_mode:	As defined in Intel's Programmer's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  *			Reference Manual, Volume 3, Chapter 8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * @vector_count:	Number of contiguous entries in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  *			Interrupt Descriptor Table that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  *			occupied by this Message-Signaled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  *			Interrupt. For "MSI", as first defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  *			in PCI 2.2, this can be between 1 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  *			32. For "MSI-X," as first defined in PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  *			3.0, this must be 1, as each MSI-X table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  *			entry would have its own descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  * @reserved:		Empty space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  * @cpu_mask:		All the target virtual processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) struct hv_msi_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	u8	vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	u8	delivery_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	u16	vector_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	u32	reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	u64	cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  * @vector:		IDT entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  * @delivery_mode:	As defined in Intel's Programmer's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  *			Reference Manual, Volume 3, Chapter 8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * @vector_count:	Number of contiguous entries in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  *			Interrupt Descriptor Table that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  *			occupied by this Message-Signaled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  *			Interrupt. For "MSI", as first defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  *			in PCI 2.2, this can be between 1 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  *			32. For "MSI-X," as first defined in PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  *			3.0, this must be 1, as each MSI-X table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  *			entry would have its own descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * @processor_count:	number of bits enabled in array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * @processor_array:	All the target virtual processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) struct hv_msi_desc2 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	u8	vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	u8	delivery_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	u16	vector_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	u16	processor_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	u16	processor_array[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  * struct tran_int_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  * @reserved:		unused, padding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  * @vector_count:	same as in hv_msi_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  * @data:		This is the "data payload" value that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  *			written by the device when it generates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  *			a message-signaled interrupt, either MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  *			or MSI-X.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * @address:		This is the address to which the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  *			payload is written on interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  *			generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) struct tran_int_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	u16	reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	u16	vector_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	u32	data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	u64	address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * A generic message format for virtual PCI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * Specific message formats are defined later in the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) struct pci_message {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) struct pci_child_message {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) struct pci_incoming_message {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	struct vmpacket_descriptor hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) struct pci_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	struct vmpacket_descriptor hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	s32 status;			/* negative values are failures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) struct pci_packet {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	void (*completion_func)(void *context, struct pci_response *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 				int resp_packet_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	void *compl_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	struct pci_message message[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * Specific message types supporting the PCI protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  * Version negotiation message. Sent from the guest to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  * The guest is free to try different versions until the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  * accepts the version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  * pci_version: The protocol version requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  * is_last_attempt: If TRUE, this is the last version guest will request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  * reservedz: Reserved field, set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) struct pci_version_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	u32 protocol_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  * Bus D0 Entry.  This is sent from the guest to the host when the virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  * bus (PCI Express port) is ready for action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) struct pci_bus_d0_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	u32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	u64 mmio_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) struct pci_bus_relations {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	struct pci_incoming_message incoming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	u32 device_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	struct pci_function_description func[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) struct pci_bus_relations2 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct pci_incoming_message incoming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	u32 device_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	struct pci_function_description2 func[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) struct pci_q_res_req_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	struct vmpacket_descriptor hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	s32 status;			/* negative values are failures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	u32 probed_bar[PCI_STD_NUM_BARS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) struct pci_set_power {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	u32 power_state;		/* In Windows terms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	u32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) struct pci_set_power_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	struct vmpacket_descriptor hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	s32 status;			/* negative values are failures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	u32 resultant_state;		/* In Windows terms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	u32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) struct pci_resources_assigned {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	u8 memory_range[0x14][6];	/* not used here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	u32 msi_descriptors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	u32 reserved[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) struct pci_resources_assigned2 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	u8 memory_range[0x14][6];	/* not used here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	u32 msi_descriptor_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	u8 reserved[70];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) struct pci_create_interrupt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	struct hv_msi_desc int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) struct pci_create_int_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct pci_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	u32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	struct tran_int_desc int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) struct pci_create_interrupt2 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	struct hv_msi_desc2 int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) struct pci_delete_interrupt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	struct tran_int_desc int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  * Note: the VM must pass a valid block id, wslot and bytes_requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) struct pci_read_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	u32 block_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	u32 bytes_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) struct pci_read_block_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	struct vmpacket_descriptor hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  * Note: the VM must pass a valid block id, wslot and byte_count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) struct pci_write_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	u32 block_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	u32 byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) struct pci_dev_inval_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	struct pci_incoming_message incoming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	u64 block_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) struct pci_dev_incoming {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	struct pci_incoming_message incoming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) struct pci_eject_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	struct pci_message message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) static int pci_ring_size = (4 * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  * Driver specific state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) enum hv_pcibus_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	hv_pcibus_init = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	hv_pcibus_probed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	hv_pcibus_installed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	hv_pcibus_removing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	hv_pcibus_maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) struct hv_pcibus_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	struct pci_sysdata sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/* Protocol version negotiated with the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	enum pci_protocol_version_t protocol_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	enum hv_pcibus_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	refcount_t remove_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	struct hv_device *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	resource_size_t low_mmio_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	resource_size_t high_mmio_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	struct resource *mem_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	struct resource *low_mmio_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	struct resource *high_mmio_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	struct completion *survey_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	struct completion remove_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	struct pci_bus *pci_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	spinlock_t config_lock;	/* Avoid two threads writing index page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	spinlock_t device_list_lock;	/* Protect lists below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	void __iomem *cfg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	struct list_head resources_for_children;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	struct list_head children;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct list_head dr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	struct msi_domain_info msi_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	struct msi_controller msi_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct irq_domain *irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	spinlock_t retarget_msi_interrupt_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	/* Highest slot of child device with resources allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	int wslot_res_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	/* hypercall arg, must not cross page boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	struct hv_retarget_device_interrupt retarget_msi_interrupt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 * Don't put anything here: retarget_msi_interrupt_params must be last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * Tracks "Device Relations" messages from the host, which must be both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  * processed in order and deferred so that they don't run in the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * of the incoming packet callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) struct hv_dr_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	struct work_struct wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct hv_pcibus_device *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) struct hv_pcidev_description {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	u16	v_id;	/* vendor ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	u16	d_id;	/* device ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	u8	rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	u8	prog_intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	u8	subclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	u8	base_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	u32	subsystem_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	union	win_slot_encoding win_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	u32	ser;	/* serial number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	u32	flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	u16	virtual_numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) struct hv_dr_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	struct list_head list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	u32 device_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct hv_pcidev_description func[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) enum hv_pcichild_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	hv_pcichild_init = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	hv_pcichild_requirements,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	hv_pcichild_resourced,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	hv_pcichild_ejecting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	hv_pcichild_maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) struct hv_pci_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	/* List protected by pci_rescan_remove_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct list_head list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	refcount_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	enum hv_pcichild_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	struct pci_slot *pci_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	struct hv_pcidev_description desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	bool reported_missing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	struct hv_pcibus_device *hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	struct work_struct wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	void (*block_invalidate)(void *context, u64 block_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	void *invalidate_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	 * read it back, for each of the BAR offsets within config space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	u32 probed_bar[PCI_STD_NUM_BARS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) struct hv_pci_compl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	struct completion host_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	s32 completion_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) static void hv_pci_onchannelcallback(void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * hv_pci_generic_compl() - Invoked for a completion packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  * @context:		Set up by the sender of the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * @resp:		The response packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * @resp_packet_size:	Size in bytes of the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * This function is used to trigger an event and report status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  * for any message for which the completion packet contains a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  * status and nothing else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) static void hv_pci_generic_compl(void *context, struct pci_response *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 				 int resp_packet_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	struct hv_pci_compl *comp_pkt = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	if (resp_packet_size >= offsetofend(struct pci_response, status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		comp_pkt->completion_status = resp->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		comp_pkt->completion_status = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	complete(&comp_pkt->host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 						u32 wslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static void get_pcichild(struct hv_pci_dev *hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	refcount_inc(&hpdev->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) static void put_pcichild(struct hv_pci_dev *hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	if (refcount_dec_and_test(&hpdev->refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		kfree(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  * There is no good way to get notified from vmbus_onoffer_rescind(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  * so let's use polling here, since this is not a hot path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) static int wait_for_response(struct hv_device *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			     struct completion *comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		if (hdev->channel->rescind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			dev_warn_once(&hdev->device, "The device is gone.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		if (wait_for_completion_timeout(comp, HZ / 10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  * devfn_to_wslot() - Convert from Linux PCI slot to Windows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621)  * @devfn:	The Linux representation of PCI slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  * Windows uses a slightly different representation of PCI slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  * Return: The Windows representation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static u32 devfn_to_wslot(int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	union win_slot_encoding wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	wslot.slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	wslot.bits.dev = PCI_SLOT(devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	wslot.bits.func = PCI_FUNC(devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return wslot.slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * wslot_to_devfn() - Convert from Windows PCI slot to Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * @wslot:	The Windows representation of PCI slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * Windows uses a slightly different representation of PCI slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  * Return: The Linux representation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) static int wslot_to_devfn(u32 wslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	union win_slot_encoding slot_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	slot_no.slot = wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * PCI Configuration Space for these root PCI buses is implemented as a pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  * of pages in memory-mapped I/O space.  Writing to the first page chooses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  * the PCI function being written or read.  Once the first page has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  * written to, the following page maps in the entire configuration space of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  * the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  * _hv_pcifront_read_config() - Internal PCI config read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  * @hpdev:	The PCI driver's representation of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  * @where:	Offset within config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  * @size:	Size of the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  * @val:	Pointer to the buffer receiving the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 				     int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	 * If the attempt is to read the IDs or the ROM BAR, simulate that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	if (where + size <= PCI_COMMAND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	} else if (where >= PCI_CLASS_REVISION && where + size <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		   PCI_CACHE_LINE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		       PCI_CLASS_REVISION, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	} else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		   PCI_ROM_ADDRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		       PCI_SUBSYSTEM_VENDOR_ID, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	} else if (where >= PCI_ROM_ADDRESS && where + size <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		   PCI_CAPABILITY_LIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		/* ROM BARs are unimplemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		*val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	} else if (where >= PCI_INTERRUPT_LINE && where + size <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		   PCI_INTERRUPT_PIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		 * Interrupt Line and Interrupt PIN are hard-wired to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		 * because this front-end only supports message-signaled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		 * interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		*val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	} else if (where + size <= CFG_PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		/* Choose the function to be read. (See comment above) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		/* Make sure the function was chosen before we start reading. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		/* Read from that function's config space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			*val = readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			*val = readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			*val = readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		 * Make sure the read was done before we release the spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		 * allowing consecutive reads/writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		dev_err(&hpdev->hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			"Attempt to read beyond a function's config space.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	u16 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			     PCI_VENDOR_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	/* Choose the function to be read. (See comment above) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	/* Make sure the function was chosen before we start reading. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	/* Read from that function's config space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	ret = readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	 * mb() is not required here, because the spin_unlock_irqrestore()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 * is a barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * _hv_pcifront_write_config() - Internal PCI config write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * @hpdev:	The PCI driver's representation of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * @where:	Offset within config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  * @size:	Size of the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  * @val:	The data being transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 				      int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	    where + size <= PCI_CAPABILITY_LIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		/* SSIDs and ROM BARs are read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		/* Choose the function to be written. (See comment above) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		/* Make sure the function was chosen before we start writing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		/* Write to that function's config space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			writeb(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			writew(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			writel(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		 * Make sure the write was done before we release the spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		 * allowing consecutive reads/writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		dev_err(&hpdev->hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			"Attempt to write beyond a function's config space.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802)  * hv_pcifront_read_config() - Read configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803)  * @bus: PCI Bus structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804)  * @devfn: Device/function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  * @where: Offset from base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806)  * @size: Byte/word/dword
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807)  * @val: Value to be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809)  * Return: PCIBIOS_SUCCESSFUL on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  *	   PCIBIOS_DEVICE_NOT_FOUND on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 				   int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	struct hv_pcibus_device *hbus =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	if (!hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	_hv_pcifront_read_config(hpdev, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830)  * hv_pcifront_write_config() - Write configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831)  * @bus: PCI Bus structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832)  * @devfn: Device/function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  * @where: Offset from base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  * @size: Byte/word/dword
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  * @val: Value to be written to device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * Return: PCIBIOS_SUCCESSFUL on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  *	   PCIBIOS_DEVICE_NOT_FOUND on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 				    int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct hv_pcibus_device *hbus =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	    container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (!hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	_hv_pcifront_write_config(hpdev, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) /* PCIe operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) static struct pci_ops hv_pcifront_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	.read  = hv_pcifront_read_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	.write = hv_pcifront_write_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * Paravirtual backchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  * Hyper-V SR-IOV provides a backchannel mechanism in software for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * communication between a VF driver and a PF driver.  These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  * "configuration blocks" are similar in concept to PCI configuration space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  * but instead of doing reads and writes in 32-bit chunks through a very slow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  * path, packets of up to 128 bytes can be sent or received asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  * Nearly every SR-IOV device contains just such a communications channel in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  * hardware, so using this one in software is usually optional.  Using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  * software channel, however, allows driver implementers to leverage software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875)  * tools that fuzz the communications channel looking for vulnerabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877)  * The usage model for these packets puts the responsibility for reading or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878)  * writing on the VF driver.  The VF driver sends a read or a write packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879)  * indicating which "block" is being referred to by number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  * If the PF driver wishes to initiate communication, it can "invalidate" one or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  * more of the first 64 blocks.  This invalidation is delivered via a callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883)  * supplied by the VF driver by this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885)  * No protocol is implied, except that supplied by the PF and VF drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) struct hv_read_config_compl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct hv_pci_compl comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	unsigned int bytes_returned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * hv_pci_read_config_compl() - Invoked when a response packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * for a read config block operation arrives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  * @context:		Identifies the read config operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * @resp:		The response packet itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * @resp_packet_size:	Size in bytes of the response packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 				     int resp_packet_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	struct hv_read_config_compl *comp = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	struct pci_read_block_response *read_resp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		(struct pci_read_block_response *)resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	unsigned int data_len, hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	hdr_len = offsetof(struct pci_read_block_response, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (resp_packet_size < hdr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		comp->comp_pkt.completion_status = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	data_len = resp_packet_size - hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (data_len > 0 && read_resp->status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		comp->bytes_returned = min(comp->len, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		comp->bytes_returned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	comp->comp_pkt.completion_status = read_resp->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	complete(&comp->comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * hv_read_config_block() - Sends a read config block request to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * the back-end driver running in the Hyper-V parent partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * @pdev:		The PCI driver's representation for this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * @buf:		Buffer into which the config block will be copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  * @len:		Size in bytes of buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * @block_id:		Identifies the config block which has been requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * @bytes_returned:	Size which came back from the back-end driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) static int hv_read_config_block(struct pci_dev *pdev, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 				unsigned int len, unsigned int block_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 				unsigned int *bytes_returned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	struct hv_pcibus_device *hbus =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			     sysdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		struct pci_packet pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		char buf[sizeof(struct pci_read_block)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	} pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	struct hv_read_config_compl comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	struct pci_read_block *read_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	init_completion(&comp_pkt.comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	comp_pkt.buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	comp_pkt.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	memset(&pkt, 0, sizeof(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	pkt.pkt.completion_func = hv_pci_read_config_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	pkt.pkt.compl_ctxt = &comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	read_blk = (struct pci_read_block *)&pkt.pkt.message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	read_blk->message_type.type = PCI_READ_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	read_blk->block_id = block_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	read_blk->bytes_requested = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			       sizeof(*read_blk), (unsigned long)&pkt.pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			       VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	if (comp_pkt.comp_pkt.completion_status != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	    comp_pkt.bytes_returned == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			"Read Config Block failed: 0x%x, bytes_returned=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			comp_pkt.comp_pkt.completion_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			comp_pkt.bytes_returned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	*bytes_returned = comp_pkt.bytes_returned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  * hv_pci_write_config_compl() - Invoked when a response packet for a write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  * config block operation arrives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  * @context:		Identifies the write config operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  * @resp:		The response packet itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)  * @resp_packet_size:	Size in bytes of the response packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 				      int resp_packet_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	struct hv_pci_compl *comp_pkt = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	comp_pkt->completion_status = resp->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	complete(&comp_pkt->host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  * hv_write_config_block() - Sends a write config block request to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  * back-end driver running in the Hyper-V parent partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  * @pdev:		The PCI driver's representation for this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  * @buf:		Buffer from which the config block will	be copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  * @len:		Size in bytes of buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  * @block_id:		Identifies the config block which is being written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static int hv_write_config_block(struct pci_dev *pdev, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 				unsigned int len, unsigned int block_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	struct hv_pcibus_device *hbus =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			     sysdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		struct pci_packet pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		char buf[sizeof(struct pci_write_block)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		u32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	} pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	struct hv_pci_compl comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	struct pci_write_block *write_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	u32 pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	init_completion(&comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	memset(&pkt, 0, sizeof(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	pkt.pkt.completion_func = hv_pci_write_config_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	pkt.pkt.compl_ctxt = &comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	write_blk = (struct pci_write_block *)&pkt.pkt.message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	write_blk->message_type.type = PCI_WRITE_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	write_blk->block_id = block_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	write_blk->byte_count = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	memcpy(write_blk->bytes, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	pkt_size = offsetof(struct pci_write_block, bytes) + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	 * This quirk is required on some hosts shipped around 2018, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	 * these hosts don't check the pkt_size correctly (new hosts have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	 * fixed since early 2019). The quirk is also safe on very old hosts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	 * and new hosts, because, on them, what really matters is the length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	 * specified in write_blk->byte_count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	pkt_size += sizeof(pkt.reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			       (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	if (comp_pkt.completion_status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			"Write Config Block failed: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			comp_pkt.completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  * hv_register_block_invalidate() - Invoked when a config block invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  * arrives from the back-end driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)  * @pdev:		The PCI driver's representation for this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  * @context:		Identifies the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)  * @block_invalidate:	Identifies all of the blocks being invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 					void (*block_invalidate)(void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 								 u64 block_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	struct hv_pcibus_device *hbus =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			     sysdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (!hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	hpdev->block_invalidate = block_invalidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	hpdev->invalidate_context = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /* Interrupt management hooks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static void hv_int_desc_free(struct hv_pci_dev *hpdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			     struct tran_int_desc *int_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	struct pci_delete_interrupt *int_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		struct pci_packet pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		u8 buffer[sizeof(struct pci_delete_interrupt)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	} ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	memset(&ctxt, 0, sizeof(ctxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	int_pkt->message_type.type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		PCI_DELETE_INTERRUPT_MESSAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	int_pkt->int_desc = *int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			 (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	kfree(int_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  * hv_msi_free() - Free the MSI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  * @domain:	The interrupt domain pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  * @info:	Extra MSI-related context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  * @irq:	Identifies the IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  * The Hyper-V parent partition and hypervisor are tracking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)  * messages that are in use, keeping the interrupt redirection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)  * table up to date.  This callback sends a message that frees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)  * the IRT entry and related tracking nonsense.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	struct hv_pcibus_device *hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	struct tran_int_desc *int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	pdev = msi_desc_to_pci_dev(msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	hbus = info->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	int_desc = irq_data_get_irq_chip_data(irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	if (!int_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	irq_data->chip_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	if (!hpdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		kfree(int_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	hv_int_desc_free(hpdev, int_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			   bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	struct irq_data *parent = data->parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	return parent->chip->irq_set_affinity(parent, dest, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) static void hv_irq_mask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	pci_msi_mask_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * hv_irq_unmask() - "Unmask" the IRQ by setting its current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * affinity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  * @data:	Describes the IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  * Build new a destination for the MSI and make a hypercall to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  * update the Interrupt Redirection Table. "Device Logical ID"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)  * is built out of this PCI bus's instance GUID and the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)  * number of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static void hv_irq_unmask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	struct irq_cfg *cfg = irqd_cfg(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	struct hv_retarget_device_interrupt *params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	struct hv_pcibus_device *hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	struct cpumask *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	cpumask_var_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	struct pci_bus *pbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	u32 var_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	int cpu, nr_bank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	u64 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	dest = irq_data_get_effective_affinity_mask(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	pdev = msi_desc_to_pci_dev(msi_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	pbus = pdev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	params = &hbus->retarget_msi_interrupt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	memset(params, 0, sizeof(*params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	params->partition_id = HV_PARTITION_ID_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	params->int_entry.source = 1; /* MSI(-X) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			   (hbus->hdev->dev_instance.b[4] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 			   (hbus->hdev->dev_instance.b[7] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			   (hbus->hdev->dev_instance.b[6] & 0xf8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			   PCI_FUNC(pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	params->int_target.vector = cfg->vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	 * Honoring apic->irq_delivery_mode set to dest_Fixed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	 * spurious interrupt storm. Not doing so does not seem to have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	 * negative effect (yet?).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		 * with >64 VP support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		 * is not sufficient for this hypercall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		params->int_target.flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 			HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			res = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			goto exit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		cpumask_and(tmp, dest, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		free_cpumask_var(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		if (nr_bank <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			res = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			goto exit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		 * var-sized hypercall, var-size starts after vp_mask (thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		 * vp_set.format does not count, but vp_set.valid_bank_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		 * does).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		var_size = 1 + nr_bank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		for_each_cpu_and(cpu, dest, cpu_online_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			params->int_target.vp_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 				(1ULL << hv_cpu_number_to_vp_number(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			      params, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) exit_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	 * During hibernation, when a CPU is offlined, the kernel tries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	 * to move the interrupt to the remaining CPUs that haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	 * been offlined yet. In this case, the below hv_do_hypercall()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	 * always fails since the vmbus channel has been closed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	 * refer to cpu_disable_common() -> fixup_irqs() ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	 * Suppress the error message for hibernation because the failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	 * during hibernation does not matter (at this time all the devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	 * have been frozen). Note: the correct affinity info is still updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	 * into the irqdata data structure in migrate_one_irq() ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	 * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	 * resumes, hv_pci_restore_msi_state() is able to correctly restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	 * the interrupt with the correct affinity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if (res && hbus->state != hv_pcibus_removing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			"%s() failed: %#llx", __func__, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	pci_msi_unmask_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) struct compose_comp_ctxt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	struct hv_pci_compl comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	struct tran_int_desc int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) static void hv_pci_compose_compl(void *context, struct pci_response *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 				 int resp_packet_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	struct compose_comp_ctxt *comp_pkt = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	struct pci_create_int_response *int_resp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		(struct pci_create_int_response *)resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	comp_pkt->comp_pkt.completion_status = resp->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	comp_pkt->int_desc = int_resp->int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	complete(&comp_pkt->comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static u32 hv_compose_msi_req_v1(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	u32 slot, u8 vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	int_pkt->wslot.slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	int_pkt->int_desc.vector = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	int_pkt->int_desc.vector_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	int_pkt->int_desc.delivery_mode = dest_Fixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	 * hv_irq_unmask().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	return sizeof(*int_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) static u32 hv_compose_msi_req_v2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	u32 slot, u8 vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	int_pkt->wslot.slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	int_pkt->int_desc.vector = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	int_pkt->int_desc.vector_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	int_pkt->int_desc.delivery_mode = dest_Fixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	 * by subsequent retarget in hv_irq_unmask().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	cpu = cpumask_first_and(affinity, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	int_pkt->int_desc.processor_array[0] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		hv_cpu_number_to_vp_number(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	int_pkt->int_desc.processor_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	return sizeof(*int_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)  * hv_compose_msi_msg() - Supplies a valid MSI address/data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)  * @data:	Everything about this MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)  * @msg:	Buffer that is filled in by this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)  * This function unpacks the IRQ looking for target CPU set, IDT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)  * vector and mode and sends a message to the parent partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)  * asking for a mapping for that tuple in this partition.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)  * response supplies a data value and address to which that data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)  * should be written to trigger that interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	struct irq_cfg *cfg = irqd_cfg(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	struct hv_pcibus_device *hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	struct vmbus_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	struct pci_bus *pbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	struct cpumask *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	struct compose_comp_ctxt comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	struct tran_int_desc *int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		struct pci_packet pci_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			struct pci_create_interrupt v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 			struct pci_create_interrupt2 v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		} int_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	} __packed ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	dest = irq_data_get_effective_affinity_mask(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	pbus = pdev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	channel = hbus->hdev->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	if (!hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		goto return_null_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	/* Free any previous message that might have already been composed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (data->chip_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		int_desc = data->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		data->chip_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		hv_int_desc_free(hpdev, int_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	if (!int_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		goto drop_reference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	memset(&ctxt, 0, sizeof(ctxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	init_completion(&comp.comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	ctxt.pci_pkt.compl_ctxt = &comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	switch (hbus->protocol_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	case PCI_PROTOCOL_VERSION_1_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 					dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 					hpdev->desc.win_slot.slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 					cfg->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	case PCI_PROTOCOL_VERSION_1_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	case PCI_PROTOCOL_VERSION_1_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 					dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 					hpdev->desc.win_slot.slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 					cfg->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		/* As we only negotiate protocol versions known to this driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		 * this path should never hit. However, this is it not a hot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		 * path so we print a message to aid future updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			"Unexpected vPCI protocol, update driver.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		goto free_int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 			       size, (unsigned long)&ctxt.pci_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 			       VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			"Sending request for interrupt failed: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 			comp.comp_pkt.completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		goto free_int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	 * Prevents hv_pci_onchannelcallback() from running concurrently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	 * in the tasklet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	tasklet_disable(&channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	 * Since this function is called with IRQ locks held, can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	 * do normal wait for completion; instead poll.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		/* 0xFFFF means an invalid PCI VENDOR ID. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 			dev_err_once(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 				     "the device has gone\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			goto enable_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		 * Make sure that the ring buffer data structure doesn't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		 * freed while we dereference the ring buffer pointer.  Test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		 * for the channel's onchannel_callback being NULL within a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		 * sched_lock critical section.  See also the inline comments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		 * in vmbus_reset_channel_cb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		spin_lock_irqsave(&channel->sched_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		if (unlikely(channel->onchannel_callback == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			spin_unlock_irqrestore(&channel->sched_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			goto enable_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		hv_pci_onchannelcallback(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		spin_unlock_irqrestore(&channel->sched_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		if (hpdev->state == hv_pcichild_ejecting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			dev_err_once(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 				     "the device is being ejected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			goto enable_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	tasklet_enable(&channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	if (comp.comp_pkt.completion_status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			"Request for interrupt failed: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 			comp.comp_pkt.completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		goto free_int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	 * Record the assignment so that this can be unwound later. Using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	 * irq_set_chip_data() here would be appropriate, but the lock it takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	 * is already held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	*int_desc = comp.int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	data->chip_data = int_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	/* Pass up the result. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	msg->address_hi = comp.int_desc.address >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	msg->address_lo = comp.int_desc.address & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	msg->data = comp.int_desc.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) enable_tasklet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	tasklet_enable(&channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) free_int_desc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	kfree(int_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) drop_reference:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return_null_message:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	msg->address_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	msg->address_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	msg->data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /* HW Interrupt Chip Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static struct irq_chip hv_msi_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	.name			= "Hyper-V PCIe MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	.irq_compose_msi_msg	= hv_compose_msi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	.irq_set_affinity	= hv_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	.irq_ack		= irq_chip_ack_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	.irq_mask		= hv_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	.irq_unmask		= hv_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) static struct msi_domain_ops hv_msi_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	.msi_prepare	= pci_msi_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	.msi_free	= hv_msi_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)  * hv_pcie_init_irq_domain() - Initialize IRQ domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)  * @hbus:	The root PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)  * This function creates an IRQ domain which will be used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)  * interrupts from devices that have been passed through.  These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)  * devices only support MSI and MSI-X, not line-based interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  * or simulations of line-based interrupts through PCIe's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)  * fabric-layer messages.  Because interrupts are remapped, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  * can support multi-message MSI here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  * Return: '0' on success and error value on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	hbus->msi_info.chip = &hv_msi_irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	hbus->msi_info.ops = &hv_msi_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		MSI_FLAG_PCI_MSIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	hbus->msi_info.handler = handle_edge_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	hbus->msi_info.handler_name = "edge";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	hbus->msi_info.data = hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 						     &hbus->msi_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 						     x86_vector_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	if (!hbus->irq_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 			"Failed to build an MSI IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)  * get_bar_size() - Get the address space consumed by a BAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)  * @bar_val:	Value that a BAR returned after -1 was written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)  *              to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)  * This function returns the size of the BAR, rounded up to 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)  * page.  It has to be rounded up because the hypervisor's page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)  * table entry that maps the BAR into the VM can't specify an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)  * offset within a page.  The invariant is that the hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)  * must place any BARs of smaller than page length at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  * beginning of a page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)  * Return:	Size in bytes of the consumed MMIO space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static u64 get_bar_size(u64 bar_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 			PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)  * survey_child_resources() - Total all MMIO requirements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)  * @hbus:	Root PCI bus, as understood by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) static void survey_child_resources(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	resource_size_t bar_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	struct completion *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	u64 bar_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	/* If nobody is waiting on the answer, don't compute it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	event = xchg(&hbus->survey_event, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	/* If the answer has already been computed, go with it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	if (hbus->low_mmio_space || hbus->high_mmio_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		complete(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	 * Due to an interesting quirk of the PCI spec, all memory regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	 * for a child device are a power of 2 in size and aligned in memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	 * so it's sufficient to just add them up without tracking alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 			if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 				dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 					"There's an I/O BAR in this list!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 			if (hpdev->probed_bar[i] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 				 * A probed BAR has all the upper bits set that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 				 * can be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 				bar_val = hpdev->probed_bar[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 					bar_val |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 					((u64)hpdev->probed_bar[++i] << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 					bar_val |= 0xffffffff00000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 				bar_size = get_bar_size(bar_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 					hbus->high_mmio_space += bar_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 					hbus->low_mmio_space += bar_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	complete(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)  * prepopulate_bars() - Fill in BARs with defaults
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)  * @hbus:	Root PCI bus, as understood by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)  * The core PCI driver code seems much, much happier if the BARs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)  * for a device have values upon first scan. So fill them in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)  * The algorithm below works down from large sizes to small,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)  * attempting to pack the assignments optimally. The assumption,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)  * enforced in other parts of the code, is that the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)  * the memory-mapped I/O space will be aligned on the largest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)  * BAR size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static void prepopulate_bars(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	resource_size_t high_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	resource_size_t low_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	resource_size_t high_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	resource_size_t low_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	resource_size_t bar_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	u64 bar_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	u32 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	bool high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	if (hbus->low_mmio_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		low_base = hbus->low_mmio_res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	if (hbus->high_mmio_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		high_size = 1ULL <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 			(63 - __builtin_clzll(hbus->high_mmio_space));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		high_base = hbus->high_mmio_res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	 * Clear the memory enable bit, in case it's already set. This occurs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	 * in the suspend path of hibernation, where the device is suspended,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	 * resumed and suspended again: see hibernation_snapshot() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	 * hibernation_platform_enter().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	 * If the memory enable bit is already set, Hyper-V sliently ignores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	 * the below BAR updates, and the related PCI device driver can not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	 * work, because reading from the device register(s) always returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	 * 0xFFFFFFFF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		command &= ~PCI_COMMAND_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		_hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	/* Pick addresses for the BARs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 			for (i = 0; i < PCI_STD_NUM_BARS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 				bar_val = hpdev->probed_bar[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 				if (bar_val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 				high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 				if (high) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 					bar_val |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 						((u64)hpdev->probed_bar[i + 1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 						 << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 					bar_val |= 0xffffffffULL << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 				bar_size = get_bar_size(bar_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 				if (high) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 					if (high_size != bar_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 						i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 						continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 					_hv_pcifront_write_config(hpdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 						PCI_BASE_ADDRESS_0 + (4 * i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 						4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 						(u32)(high_base & 0xffffff00));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 					i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 					_hv_pcifront_write_config(hpdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 						PCI_BASE_ADDRESS_0 + (4 * i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 						4, (u32)(high_base >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 					high_base += bar_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 					if (low_size != bar_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 						continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 					_hv_pcifront_write_config(hpdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 						PCI_BASE_ADDRESS_0 + (4 * i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 						4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 						(u32)(low_base & 0xffffff00));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 					low_base += bar_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 			if (high_size <= 1 && low_size <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 				/* Set the memory enable bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 				_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 							 &command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 				command |= PCI_COMMAND_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 				_hv_pcifront_write_config(hpdev, PCI_COMMAND, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 							  command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		high_size >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		low_size >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	}  while (high_size || low_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)  * Assign entries in sysfs pci slot directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)  * Note that this function does not need to lock the children list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)  * because it is called from pci_devices_present_work which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)  * is serialized with hv_eject_device_work because they are on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)  * same ordered workqueue. Therefore hbus->children list will not change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)  * even when pci_create_slot sleeps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	char name[SLOT_NAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	int slot_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		if (hpdev->pci_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 					  name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		if (IS_ERR(hpdev->pci_slot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 			pr_warn("pci_create slot %s failed\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 			hpdev->pci_slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)  * Remove entries in sysfs pci slot directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		if (!hpdev->pci_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		pci_destroy_slot(hpdev->pci_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		hpdev->pci_slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)  * Set NUMA node for the devices on the bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	struct pci_bus *bus = hbus->pci_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	struct hv_pci_dev *hv_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	list_for_each_entry(dev, &bus->devices, bus_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		if (!hv_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		    hv_dev->desc.virtual_numa_node < num_possible_nodes())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			 * The kernel may boot with some NUMA nodes offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			 * (e.g. in a KDUMP kernel) or with NUMA disabled via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 			 * "numa=off". In those cases, adjust the host provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			 * NUMA node to a valid NUMA node used by the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			set_dev_node(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 				     numa_map_to_online_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 					     hv_dev->desc.virtual_numa_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		put_pcichild(hv_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)  * create_root_hv_pci_bus() - Expose a new root PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)  * @hbus:	Root PCI bus, as understood by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	/* Register the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 					    0, /* bus number is always zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 					    &hv_pcifront_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 					    &hbus->sysdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 					    &hbus->resources_for_children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	if (!hbus->pci_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	hbus->pci_bus->msi = &hbus->msi_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	hbus->pci_bus->msi->dev = &hbus->hdev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	pci_lock_rescan_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	pci_scan_child_bus(hbus->pci_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	hv_pci_assign_numa_node(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	pci_bus_assign_resources(hbus->pci_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	hv_pci_assign_slots(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	pci_bus_add_devices(hbus->pci_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	pci_unlock_rescan_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	hbus->state = hv_pcibus_installed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) struct q_res_req_compl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	struct completion host_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)  * q_resource_requirements() - Query Resource Requirements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)  * @context:		The completion context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)  * @resp:		The response that came from the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)  * @resp_packet_size:	The size in bytes of resp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)  * This function is invoked on completion of a Query Resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)  * Requirements packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static void q_resource_requirements(void *context, struct pci_response *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 				    int resp_packet_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	struct q_res_req_compl *completion = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	struct pci_q_res_req_response *q_res_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		(struct pci_q_res_req_response *)resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	if (resp->status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		dev_err(&completion->hpdev->hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			"query resource requirements failed: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			resp->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			completion->hpdev->probed_bar[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 				q_res_req->probed_bar[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	complete(&completion->host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)  * new_pcichild_device() - Create a new child device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)  * @hbus:	The internal struct tracking this root PCI bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)  * @desc:	The information supplied so far from the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)  *              about the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)  * This function creates the tracking structure for a new child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)  * device and kicks off the process of figuring out what it is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)  * Return: Pointer to the new tracking struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		struct hv_pcidev_description *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	struct pci_child_message *res_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	struct q_res_req_compl comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 		struct pci_packet init_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		u8 buffer[sizeof(struct pci_child_message)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	} pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	if (!hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	hpdev->hbus = hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	memset(&pkt, 0, sizeof(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	init_completion(&comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	comp_pkt.hpdev = hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	pkt.init_packet.compl_ctxt = &comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	pkt.init_packet.completion_func = q_resource_requirements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	res_req = (struct pci_child_message *)&pkt.init_packet.message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	res_req->wslot.slot = desc->win_slot.slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			       sizeof(struct pci_child_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 			       (unsigned long)&pkt.init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 			       VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	hpdev->desc = *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	refcount_set(&hpdev->refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	get_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	list_add_tail(&hpdev->list_entry, &hbus->children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	return hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	kfree(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)  * get_pcichild_wslot() - Find device from slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)  * @hbus:	Root PCI bus, as understood by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)  * @wslot:	Location on the bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)  * This function looks up a PCI device and returns the internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)  * representation of it.  It acquires a reference on it, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)  * the device won't be deleted while somebody is using it.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)  * caller is responsible for calling put_pcichild() to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)  * this reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  * Return:	Internal representation of a PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 					     u32 wslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	struct hv_pci_dev *iter, *hpdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	list_for_each_entry(iter, &hbus->children, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		if (iter->desc.win_slot.slot == wslot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			hpdev = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			get_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	return hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)  * pci_devices_present_work() - Handle new list of child devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)  * @work:	Work struct embedded in struct hv_dr_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)  * "Bus Relations" is the Windows term for "children of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)  * bus."  The terminology is preserved here for people trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)  * debug the interaction between Hyper-V and Linux.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)  * function is called when the parent partition reports a list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)  * of functions that should be observed under this PCI Express
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)  * port (bus).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)  * This function updates the list, and must tolerate being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)  * called multiple times with the same information.  The typical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)  * number of child devices is one, with very atypical cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)  * involving three or four, so the algorithms used here can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  * simple and inefficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)  * It must also treat the omission of a previously observed device as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)  * notification that the device no longer exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)  * Note that this function is serialized with hv_eject_device_work(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)  * because both are pushed to the ordered workqueue hbus->wq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) static void pci_devices_present_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	u32 child_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	bool found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	struct hv_pcidev_description *new_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	struct hv_pcibus_device *hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	struct list_head removed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	struct hv_dr_work *dr_wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	struct hv_dr_state *dr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	dr_wrk = container_of(work, struct hv_dr_work, wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	hbus = dr_wrk->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	kfree(dr_wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	INIT_LIST_HEAD(&removed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	/* Pull this off the queue and process it if it was the last one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	while (!list_empty(&hbus->dr_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 				      list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		list_del(&dr->list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		/* Throw this away if the list still has stuff in it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		if (!list_empty(&hbus->dr_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 			kfree(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	if (!dr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		put_hvpcibus(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	/* First, mark all existing children as reported missing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		hpdev->reported_missing = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	/* Next, add back any reported devices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	for (child_no = 0; child_no < dr->device_count; child_no++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		new_desc = &dr->func[child_no];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 			if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 			    (hpdev->desc.v_id == new_desc->v_id) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 			    (hpdev->desc.d_id == new_desc->d_id) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 			    (hpdev->desc.ser == new_desc->ser)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 				hpdev->reported_missing = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 				found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 			hpdev = new_pcichild_device(hbus, new_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 			if (!hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 				dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 					"couldn't record a child device.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	/* Move missing children to a list on the stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 			if (hpdev->reported_missing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 				found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 				put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 				list_move_tail(&hpdev->list_entry, &removed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	} while (found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	/* Delete everything that should no longer exist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	while (!list_empty(&removed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		hpdev = list_first_entry(&removed, struct hv_pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 					 list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		list_del(&hpdev->list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		if (hpdev->pci_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 			pci_destroy_slot(hpdev->pci_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	switch (hbus->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	case hv_pcibus_installed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		 * Tell the core to rescan bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		 * because there may have been changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 		pci_lock_rescan_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		pci_scan_child_bus(hbus->pci_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		hv_pci_assign_numa_node(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 		hv_pci_assign_slots(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 		pci_unlock_rescan_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	case hv_pcibus_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	case hv_pcibus_probed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		survey_child_resources(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	put_hvpcibus(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	kfree(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)  * hv_pci_start_relations_work() - Queue work to start device discovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)  * @hbus:	Root PCI bus, as understood by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)  * @dr:		The list of children returned from host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)  * Return:  0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 				       struct hv_dr_state *dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	struct hv_dr_work *dr_wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	bool pending_dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	if (hbus->state == hv_pcibus_removing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		dev_info(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 			 "PCI VMBus BUS_RELATIONS: ignored\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	if (!dr_wrk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	dr_wrk->bus = hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	 * If pending_dr is true, we have already queued a work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	 * which will see the new dr. Otherwise, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	 * queue a new work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	pending_dr = !list_empty(&hbus->dr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	list_add_tail(&dr->list_entry, &hbus->dr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	if (pending_dr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		kfree(dr_wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		get_hvpcibus(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		queue_work(hbus->wq, &dr_wrk->wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)  * hv_pci_devices_present() - Handle list of new children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)  * @hbus:      Root PCI bus, as understood by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)  * @relations: Packet from host listing children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)  * Process a new list of devices on the bus. The list of devices is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)  * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)  * whenever a new list of devices for this bus appears.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 				   struct pci_bus_relations *relations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	struct hv_dr_state *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	dr = kzalloc(struct_size(dr, func, relations->device_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		     GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	if (!dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	dr->device_count = relations->device_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	for (i = 0; i < dr->device_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		dr->func[i].v_id = relations->func[i].v_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		dr->func[i].d_id = relations->func[i].d_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		dr->func[i].rev = relations->func[i].rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		dr->func[i].prog_intf = relations->func[i].prog_intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		dr->func[i].subclass = relations->func[i].subclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		dr->func[i].base_class = relations->func[i].base_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		dr->func[i].win_slot = relations->func[i].win_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		dr->func[i].ser = relations->func[i].ser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	if (hv_pci_start_relations_work(hbus, dr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		kfree(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)  * hv_pci_devices_present2() - Handle list of new children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)  * @hbus:	Root PCI bus, as understood by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)  * @relations:	Packet from host listing children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)  * This function is the v2 version of hv_pci_devices_present()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 				    struct pci_bus_relations2 *relations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	struct hv_dr_state *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	dr = kzalloc(struct_size(dr, func, relations->device_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		     GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	if (!dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	dr->device_count = relations->device_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	for (i = 0; i < dr->device_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		dr->func[i].v_id = relations->func[i].v_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		dr->func[i].d_id = relations->func[i].d_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		dr->func[i].rev = relations->func[i].rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		dr->func[i].prog_intf = relations->func[i].prog_intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		dr->func[i].subclass = relations->func[i].subclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		dr->func[i].base_class = relations->func[i].base_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		dr->func[i].win_slot = relations->func[i].win_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		dr->func[i].ser = relations->func[i].ser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		dr->func[i].flags = relations->func[i].flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		dr->func[i].virtual_numa_node =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 			relations->func[i].virtual_numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	if (hv_pci_start_relations_work(hbus, dr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		kfree(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)  * hv_eject_device_work() - Asynchronously handles ejection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)  * @work:	Work struct embedded in internal device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)  * This function handles ejecting a device.  Windows will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)  * attempt to gracefully eject a device, waiting 60 seconds to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)  * hear back from the guest OS that this completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)  * If this timer expires, the device will be forcibly removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) static void hv_eject_device_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	struct pci_eject_response *ejct_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	struct hv_pcibus_device *hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	int wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		struct pci_packet pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		u8 buffer[sizeof(struct pci_eject_response)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	} ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	hpdev = container_of(work, struct hv_pci_dev, wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	hbus = hpdev->hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	WARN_ON(hpdev->state != hv_pcichild_ejecting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	 * Ejection can come before or after the PCI bus has been set up, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	 * attempt to find it and tear down the bus state, if it exists.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	 * must be done without constructs like pci_domain_nr(hbus->pci_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	 * because hbus->pci_bus may not exist yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	if (pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		pci_lock_rescan_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		pci_stop_and_remove_bus_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		pci_dev_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		pci_unlock_rescan_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	list_del(&hpdev->list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	if (hpdev->pci_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		pci_destroy_slot(hpdev->pci_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	memset(&ctxt, 0, sizeof(ctxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 			 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 			 VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	/* For the get_pcichild() in hv_pci_eject_device() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	/* For the two refs got in new_pcichild_device() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	/* hpdev has been freed. Do not use it any more. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	put_hvpcibus(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)  * hv_pci_eject_device() - Handles device ejection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)  * @hpdev:	Internal device tracking struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)  * This function is invoked when an ejection packet arrives.  It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)  * just schedules work so that we don't re-enter the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)  * delivery code handling the ejection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	struct hv_pcibus_device *hbus = hpdev->hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	struct hv_device *hdev = hbus->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	if (hbus->state == hv_pcibus_removing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	hpdev->state = hv_pcichild_ejecting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	get_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	get_hvpcibus(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	queue_work(hbus->wq, &hpdev->wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)  * hv_pci_onchannelcallback() - Handles incoming packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)  * @context:	Internal bus tracking struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)  * This function is invoked whenever the host sends a packet to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)  * this channel (which is private to this root PCI bus).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) static void hv_pci_onchannelcallback(void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	const int packet_size = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	struct hv_pcibus_device *hbus = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	u32 bytes_recvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	u64 req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	struct vmpacket_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	unsigned char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	int bufferlen = packet_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	struct pci_packet *comp_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	struct pci_response *response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	struct pci_incoming_message *new_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	struct pci_bus_relations *bus_rel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	struct pci_bus_relations2 *bus_rel2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	struct pci_dev_inval_block *inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	struct pci_dev_incoming *dev_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	buffer = kmalloc(bufferlen, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 					   bufferlen, &bytes_recvd, &req_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		if (ret == -ENOBUFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 			kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 			/* Handle large packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 			bufferlen = bytes_recvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 			if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 		/* Zero length indicates there are no more packets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		if (ret || !bytes_recvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		 * All incoming packets must be at least as large as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		 * response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		if (bytes_recvd <= sizeof(struct pci_response))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		desc = (struct vmpacket_descriptor *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		switch (desc->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		case VM_PKT_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 			 * The host is trusted, and thus it's safe to interpret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 			 * this transaction ID as a pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 			comp_packet = (struct pci_packet *)req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 			response = (struct pci_response *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 			comp_packet->completion_func(comp_packet->compl_ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 						     response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 						     bytes_recvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		case VM_PKT_DATA_INBAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 			new_message = (struct pci_incoming_message *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 			switch (new_message->message_type.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 			case PCI_BUS_RELATIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 				bus_rel = (struct pci_bus_relations *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 				if (bytes_recvd <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 					struct_size(bus_rel, func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 						    bus_rel->device_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 					dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 						"bus relations too small\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 				hv_pci_devices_present(hbus, bus_rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 			case PCI_BUS_RELATIONS2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 				bus_rel2 = (struct pci_bus_relations2 *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 				if (bytes_recvd <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 					struct_size(bus_rel2, func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 						    bus_rel2->device_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 					dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 						"bus relations v2 too small\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 				hv_pci_devices_present2(hbus, bus_rel2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 			case PCI_EJECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 				dev_message = (struct pci_dev_incoming *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 				hpdev = get_pcichild_wslot(hbus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 						      dev_message->wslot.slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 				if (hpdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 					hv_pci_eject_device(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 					put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 			case PCI_INVALIDATE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 				inval = (struct pci_dev_inval_block *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 				hpdev = get_pcichild_wslot(hbus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 							   inval->wslot.slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 				if (hpdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 					if (hpdev->block_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 						hpdev->block_invalidate(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 						    hpdev->invalidate_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 						    inval->block_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 					put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 				dev_warn(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 					"Unimplemented protocol message %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 					new_message->message_type.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 			dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 				"unhandled packet type %d, tid %llx len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 				desc->type, req_id, bytes_recvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)  * hv_pci_protocol_negotiation() - Set up protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)  * @hdev:		VMBus's tracking struct for this root PCI bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)  * @version:		Array of supported channel protocol versions in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)  *			the order of probing - highest go first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)  * @num_version:	Number of elements in the version array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)  * This driver is intended to support running on Windows 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)  * (server) and later versions. It will not run on earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)  * versions, as they assume that many of the operations which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)  * Linux needs accomplished with a spinlock held were done via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)  * asynchronous messaging via VMBus.  Windows 10 increases the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)  * surface area of PCI emulation so that these actions can take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)  * place by suspending a virtual processor for their duration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)  * This function negotiates the channel protocol version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)  * failing if the host doesn't support the necessary protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)  * level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) static int hv_pci_protocol_negotiation(struct hv_device *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 				       enum pci_protocol_version_t version[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 				       int num_version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	struct pci_version_request *version_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	struct hv_pci_compl comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	struct pci_packet *pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	 * Initiate the handshake with the host and negotiate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	 * a version that the host can support. We start with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	 * highest version number and go down if the host cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	 * support it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	if (!pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	init_completion(&comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	pkt->completion_func = hv_pci_generic_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	pkt->compl_ctxt = &comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	version_req = (struct pci_version_request *)&pkt->message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	for (i = 0; i < num_version; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 		version_req->protocol_version = version[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 		ret = vmbus_sendpacket(hdev->channel, version_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 				sizeof(struct pci_version_request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 				(unsigned long)pkt, VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 			ret = wait_for_response(hdev, &comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 			dev_err(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 				"PCI Pass-through VSP failed to request version: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 		if (comp_pkt.completion_status >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 			hbus->protocol_version = version[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 			dev_info(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 				"PCI VMBus probing: Using version %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 				hbus->protocol_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 			dev_err(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 				"PCI Pass-through VSP failed version request: %#x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 				comp_pkt.completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 			ret = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		reinit_completion(&comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	dev_err(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		"PCI pass-through VSP failed to find supported version");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	ret = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	kfree(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)  * hv_pci_free_bridge_windows() - Release memory regions for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)  * bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)  * @hbus:	Root PCI bus, as understood by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	 * Set the resources back to the way they looked when they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	 * were allocated by setting IORESOURCE_BUSY again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	if (hbus->low_mmio_space && hbus->low_mmio_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 		vmbus_free_mmio(hbus->low_mmio_res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 				resource_size(hbus->low_mmio_res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	if (hbus->high_mmio_space && hbus->high_mmio_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		vmbus_free_mmio(hbus->high_mmio_res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 				resource_size(hbus->high_mmio_res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)  * hv_pci_allocate_bridge_windows() - Allocate memory regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)  * for the bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)  * @hbus:	Root PCI bus, as understood by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)  * This function calls vmbus_allocate_mmio(), which is itself a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)  * bit of a compromise.  Ideally, we might change the pnp layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)  * in the kernel such that it comprehends either PCI devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)  * which are "grandchildren of ACPI," with some intermediate bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)  * node (in this case, VMBus) or change it such that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)  * understands VMBus.  The pnp layer, however, has been declared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)  * deprecated, and not subject to change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)  * The workaround, implemented here, is to ask VMBus to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)  * MMIO space for this bus.  VMBus itself knows which ranges are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)  * appropriate by looking at its own ACPI objects.  Then, after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)  * these ranges are claimed, they're modified to look like they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)  * would have looked if the ACPI and pnp code had allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)  * bridge windows.  These descriptors have to exist in this form
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)  * in order to satisfy the code which will get invoked when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)  * endpoint PCI function driver calls request_mem_region() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)  * request_mem_region_exclusive().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	resource_size_t align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	if (hbus->low_mmio_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 		align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 		ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 					  (u64)(u32)0xffffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 					  hbus->low_mmio_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 					  align, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 			dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 				"Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 				hbus->low_mmio_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		/* Modify this resource to become a bridge window. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 		hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		pci_add_resource(&hbus->resources_for_children,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 				 hbus->low_mmio_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	if (hbus->high_mmio_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 					  0x100000000, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 					  hbus->high_mmio_space, align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 					  false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 			dev_err(&hbus->hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 				"Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 				hbus->high_mmio_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 			goto release_low_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 		/* Modify this resource to become a bridge window. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 		hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		pci_add_resource(&hbus->resources_for_children,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 				 hbus->high_mmio_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) release_low_mmio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	if (hbus->low_mmio_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		vmbus_free_mmio(hbus->low_mmio_res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 				resource_size(hbus->low_mmio_res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)  * hv_allocate_config_window() - Find MMIO space for PCI Config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)  * @hbus:	Root PCI bus, as understood by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)  * This function claims memory-mapped I/O space for accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)  * configuration space for the functions on this bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	 * Set up a region of MMIO space to use for accessing configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	 * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 				  PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	 * vmbus_allocate_mmio() gets used for allocating both device endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	 * resource claims (those which cannot be overlapped) and the ranges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	 * which are valid for the children of this bus, which are intended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	 * to be overlapped by those children.  Set the flag on this claim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	 * meaning that this region can't be overlapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	hbus->mem_config->flags |= IORESOURCE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) static void hv_free_config_window(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769)  * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)  * @hdev:	VMBus's tracking struct for this root PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) static int hv_pci_enter_d0(struct hv_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	struct pci_bus_d0_entry *d0_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	struct hv_pci_compl comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	struct pci_packet *pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	 * Tell the host that the bus is ready to use, and moved into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	 * powered-on state.  This includes telling the host which region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	 * of memory-mapped I/O space has been chosen for configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	 * access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	if (!pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	init_completion(&comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	pkt->completion_func = hv_pci_generic_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	pkt->compl_ctxt = &comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	d0_entry->message_type.type = PCI_BUS_D0ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	d0_entry->mmio_base = hbus->mem_config->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		ret = wait_for_response(hdev, &comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	if (comp_pkt.completion_status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 		dev_err(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 			"PCI Pass-through VSP failed D0 Entry with status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 			comp_pkt.completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		ret = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	kfree(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)  * hv_pci_query_relations() - Ask host to send list of child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)  * devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)  * @hdev:	VMBus's tracking struct for this root PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) static int hv_pci_query_relations(struct hv_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	struct pci_message message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	struct completion comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	/* Ask the host to send along the list of child devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	init_completion(&comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	if (cmpxchg(&hbus->survey_event, NULL, &comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		return -ENOTEMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	memset(&message, 0, sizeof(message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 	message.type = PCI_QUERY_BUS_RELATIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 			       0, VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 		ret = wait_for_response(hdev, &comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)  * hv_send_resources_allocated() - Report local resource choices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)  * @hdev:	VMBus's tracking struct for this root PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)  * The host OS is expecting to be sent a request as a message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)  * which contains all the resources that the device will use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)  * The response contains those same resources, "translated"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)  * which is to say, the values which should be used by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)  * hardware, when it delivers an interrupt.  (MMIO resources are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)  * used in local terms.)  This is nice for Windows, and lines up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)  * with the FDO/PDO split, which doesn't exist in Linux.  Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)  * is deeply expecting to scan an emulated PCI configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)  * space.  So this message is sent here only to drive the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)  * machine on the host forward.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) static int hv_send_resources_allocated(struct hv_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	struct pci_resources_assigned *res_assigned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	struct pci_resources_assigned2 *res_assigned2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	struct hv_pci_compl comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	struct pci_packet *pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	size_t size_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 	int wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 			? sizeof(*res_assigned) : sizeof(*res_assigned2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	if (!pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	for (wslot = 0; wslot < 256; wslot++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		hpdev = get_pcichild_wslot(hbus, wslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 		if (!hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 		memset(pkt, 0, sizeof(*pkt) + size_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 		init_completion(&comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		pkt->completion_func = hv_pci_generic_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		pkt->compl_ctxt = &comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 			res_assigned =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 				(struct pci_resources_assigned *)&pkt->message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 			res_assigned->message_type.type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 				PCI_RESOURCES_ASSIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 			res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 			res_assigned2 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 				(struct pci_resources_assigned2 *)&pkt->message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 			res_assigned2->message_type.type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 				PCI_RESOURCES_ASSIGNED2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 			res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 		put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		ret = vmbus_sendpacket(hdev->channel, &pkt->message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 				size_res, (unsigned long)pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 				VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 			ret = wait_for_response(hdev, &comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 		if (comp_pkt.completion_status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 			ret = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 			dev_err(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 				"resource allocated returned 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 				comp_pkt.completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 		hbus->wslot_res_allocated = wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	kfree(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)  * hv_send_resources_released() - Report local resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)  * released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)  * @hdev:	VMBus's tracking struct for this root PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) static int hv_send_resources_released(struct hv_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	struct pci_child_message pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	struct hv_pci_dev *hpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	int wslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 		hpdev = get_pcichild_wslot(hbus, wslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 		if (!hpdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 		memset(&pkt, 0, sizeof(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 		pkt.message_type.type = PCI_RESOURCES_RELEASED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 		pkt.wslot.slot = hpdev->desc.win_slot.slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 		put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 		ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 				       VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		hbus->wslot_res_allocated = wslot - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	hbus->wslot_res_allocated = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) static void get_hvpcibus(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	refcount_inc(&hbus->remove_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) static void put_hvpcibus(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	if (refcount_dec_and_test(&hbus->remove_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 		complete(&hbus->remove_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) #define HVPCI_DOM_MAP_SIZE (64 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)  * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)  * as invalid for passthrough PCI devices of this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) #define HVPCI_DOM_INVALID 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)  * hv_get_dom_num() - Get a valid PCI domain number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)  * Check if the PCI domain number is in use, and return another number if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)  * it is in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)  * @dom: Requested domain number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)  * return: domain number on success, HVPCI_DOM_INVALID on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) static u16 hv_get_dom_num(u16 dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	if (test_and_set_bit(dom, hvpci_dom_map) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 		return dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 		if (test_and_set_bit(i, hvpci_dom_map) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	return HVPCI_DOM_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)  * hv_put_dom_num() - Mark the PCI domain number as free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)  * @dom: Domain number to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) static void hv_put_dom_num(u16 dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	clear_bit(dom, hvpci_dom_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)  * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)  * @hdev:	VMBus's tracking struct for this root PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)  * @dev_id:	Identifies the device itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) static int hv_pci_probe(struct hv_device *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 			const struct hv_vmbus_device_id *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	struct hv_pcibus_device *hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	u16 dom_req, dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	bool enter_d0_retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	 * hv_pcibus_device contains the hypercall arguments for retargeting in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	 * hv_irq_unmask(). Those must not cross a page boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	 * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	 * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	 * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	 * alignment of hbus is important because hbus's field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	 * retarget_msi_interrupt_params must not cross a 4KB page boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 	 * Here we prefer kzalloc to get_zeroed_page(), because a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	 * allocated by the latter is not tracked and scanned by kmemleak, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 	 * hence kmemleak reports the pointer contained in the hbus buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	 * (i.e. the hpdev struct, which is created in new_pcichild_device() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	 * is tracked by hbus->children) as memory leak (false positive).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	 * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	 * used to allocate the hbus buffer and we can avoid the kmemleak false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	 * positive by using kmemleak_alloc() and kmemleak_free() to ask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 	 * kmemleak to track and scan the hbus buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	if (!hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	hbus->state = hv_pcibus_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	hbus->wslot_res_allocated = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	 * The PCI bus "domain" is what is called "segment" in ACPI and other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	 * specs. Pull it from the instance ID, to get something usually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	 * unique. In rare cases of collision, we will find out another number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 	 * not in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	 * together with this guest driver can guarantee that (1) The only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	 * domain used by Gen1 VMs for something that looks like a physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	 * (2) There will be no overlap between domains (after fixing possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	 * collisions) in the same VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 	dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 	dom = hv_get_dom_num(dom_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	if (dom == HVPCI_DOM_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 		dev_err(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 			"Unable to use dom# 0x%hx or other numbers", dom_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 		goto free_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	if (dom != dom_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 		dev_info(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 			 "PCI dom# 0x%hx has collision, using 0x%hx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 			 dom_req, dom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 	hbus->sysdata.domain = dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	hbus->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	refcount_set(&hbus->remove_lock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	INIT_LIST_HEAD(&hbus->children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	INIT_LIST_HEAD(&hbus->dr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	INIT_LIST_HEAD(&hbus->resources_for_children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	spin_lock_init(&hbus->config_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	spin_lock_init(&hbus->device_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	spin_lock_init(&hbus->retarget_msi_interrupt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	init_completion(&hbus->remove_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 					   hbus->sysdata.domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	if (!hbus->wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 		goto free_dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 			 hv_pci_onchannelcallback, hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 		goto destroy_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 	hv_set_drvdata(hdev, hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 					  ARRAY_SIZE(pci_protocol_versions));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 		goto close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	ret = hv_allocate_config_window(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 		goto close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	hbus->cfg_addr = ioremap(hbus->mem_config->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 				 PCI_CONFIG_MMIO_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	if (!hbus->cfg_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 		dev_err(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 			"Unable to map a virtual address for config space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 		goto free_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 	hbus->sysdata.fwnode = irq_domain_alloc_named_fwnode(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 	kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	if (!hbus->sysdata.fwnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 		goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 	ret = hv_pcie_init_irq_domain(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		goto free_fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	ret = hv_pci_query_relations(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 		goto free_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	ret = hv_pci_enter_d0(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	 * In certain case (Kdump) the pci device of interest was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	 * not cleanly shut down and resource is still held on host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	 * side, the host could return invalid device status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	 * We need to explicitly request host to release the resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 	 * and try to enter D0 again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	 * Since the hv_pci_bus_exit() call releases structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 	 * of all its child devices, we need to start the retry from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	 * hv_pci_query_relations() call, requesting host to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 	 * the synchronous child device relations message before this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 	 * information is needed in hv_send_resources_allocated()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	 * call later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	if (ret == -EPROTO && enter_d0_retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 		enter_d0_retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 		dev_err(&hdev->device, "Retrying D0 Entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 		 * Hv_pci_bus_exit() calls hv_send_resources_released()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 		 * to free up resources of its child devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		 * In the kdump kernel we need to set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 		 * wslot_res_allocated to 255 so it scans all child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 		 * devices to release resources allocated in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 		 * normal kernel before panic happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 		hbus->wslot_res_allocated = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 		ret = hv_pci_bus_exit(hdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 		dev_err(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 			"Retrying D0 failed with ret %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 		goto free_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 	ret = hv_pci_allocate_bridge_windows(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 		goto exit_d0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	ret = hv_send_resources_allocated(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 		goto free_windows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	prepopulate_bars(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	hbus->state = hv_pcibus_probed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 	ret = create_root_hv_pci_bus(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		goto free_windows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) free_windows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	hv_pci_free_bridge_windows(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) exit_d0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	(void) hv_pci_bus_exit(hdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) free_irq_domain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	irq_domain_remove(hbus->irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) free_fwnode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	irq_domain_free_fwnode(hbus->sysdata.fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	iounmap(hbus->cfg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) free_config:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 	hv_free_config_window(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 	vmbus_close(hdev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) destroy_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 	destroy_workqueue(hbus->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) free_dom:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	hv_put_dom_num(hbus->sysdata.domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) free_bus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	kfree(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 		struct pci_packet teardown_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 		u8 buffer[sizeof(struct pci_message)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	} pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	struct hv_pci_compl comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	struct hv_pci_dev *hpdev, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 	 * After the host sends the RESCIND_CHANNEL message, it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	 * access the per-channel ringbuffer any longer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 	if (hdev->channel->rescind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 	if (!keep_devs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 		struct list_head removed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 		/* Move all present children to the list on stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 		INIT_LIST_HEAD(&removed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 		spin_lock_irqsave(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 		list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 			list_move_tail(&hpdev->list_entry, &removed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 		/* Remove all children in the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 		list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 			list_del(&hpdev->list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 			if (hpdev->pci_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 				pci_destroy_slot(hpdev->pci_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 			/* For the two refs got in new_pcichild_device() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 			put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 			put_pcichild(hpdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	ret = hv_send_resources_released(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 		dev_err(&hdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 			"Couldn't send resources released packet(s)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	init_completion(&comp_pkt.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	pkt.teardown_packet.completion_func = hv_pci_generic_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	pkt.teardown_packet.compl_ctxt = &comp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 			       sizeof(struct pci_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 			       (unsigned long)&pkt.teardown_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 			       VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 	if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319)  * hv_pci_remove() - Remove routine for this VMBus channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320)  * @hdev:	VMBus's tracking struct for this root PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) static int hv_pci_remove(struct hv_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	struct hv_pcibus_device *hbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	hbus = hv_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	if (hbus->state == hv_pcibus_installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 		tasklet_disable(&hdev->channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 		hbus->state = hv_pcibus_removing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 		tasklet_enable(&hdev->channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 		destroy_workqueue(hbus->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 		hbus->wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 		 * At this point, no work is running or can be scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 		 * on hbus-wq. We can't race with hv_pci_devices_present()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 		 * or hv_pci_eject_device(), it's safe to proceed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 		/* Remove the bus from PCI's point of view. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 		pci_lock_rescan_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 		pci_stop_root_bus(hbus->pci_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 		hv_pci_remove_slots(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 		pci_remove_root_bus(hbus->pci_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 		pci_unlock_rescan_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 	ret = hv_pci_bus_exit(hdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	vmbus_close(hdev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 	iounmap(hbus->cfg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 	hv_free_config_window(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	pci_free_resource_list(&hbus->resources_for_children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 	hv_pci_free_bridge_windows(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 	irq_domain_remove(hbus->irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	irq_domain_free_fwnode(hbus->sysdata.fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	put_hvpcibus(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	wait_for_completion(&hbus->remove_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	hv_put_dom_num(hbus->sysdata.domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	kfree(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) static int hv_pci_suspend(struct hv_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 	enum hv_pcibus_state old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 	 * hv_pci_suspend() must make sure there are no pending work items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 	 * before calling vmbus_close(), since it runs in a process context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 	 * as a callback in dpm_suspend().  When it starts to run, the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	 * callback hv_pci_onchannelcallback(), which runs in a tasklet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	 * context, can be still running concurrently and scheduling new work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	 * items onto hbus->wq in hv_pci_devices_present() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	 * hv_pci_eject_device(), and the work item handlers can access the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	 * the work item handler pci_devices_present_work() ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	 * new_pcichild_device() writes to the vmbus channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 	 * To eliminate the race, hv_pci_suspend() disables the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 	 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 	 * it knows that no new work item can be scheduled, and then it flushes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 	 * hbus->wq and safely closes the vmbus channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	tasklet_disable(&hdev->channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	/* Change the hbus state to prevent new work items. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	old_state = hbus->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 	if (hbus->state == hv_pcibus_installed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 		hbus->state = hv_pcibus_removing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	tasklet_enable(&hdev->channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	if (old_state != hv_pcibus_installed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	flush_workqueue(hbus->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 	ret = hv_pci_bus_exit(hdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	vmbus_close(hdev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 	struct msi_desc *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	struct irq_data *irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	for_each_pci_msi_entry(entry, pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 		irq_data = irq_get_irq_data(entry->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 		if (WARN_ON_ONCE(!irq_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		hv_compose_msi_msg(irq_data, &entry->msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)  * Upon resume, pci_restore_msi_state() -> ... ->  __pci_write_msi_msg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434)  * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)  * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)  * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437)  * Table entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) static int hv_pci_resume(struct hv_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 	enum pci_protocol_version_t version[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 	hbus->state = hv_pcibus_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 			 hv_pci_onchannelcallback, hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	/* Only use the version that was in use before hibernation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 	version[0] = hbus->protocol_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 	ret = hv_pci_protocol_negotiation(hdev, version, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	ret = hv_pci_query_relations(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 	ret = hv_pci_enter_d0(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 	ret = hv_send_resources_allocated(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	prepopulate_bars(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 	hv_pci_restore_msi_state(hbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 	hbus->state = hv_pcibus_installed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	vmbus_close(hdev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) static const struct hv_vmbus_device_id hv_pci_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	/* PCI Pass-through Class ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 	/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	{ HV_PCIE_GUID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) static struct hv_driver hv_pci_drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 	.name		= "hv_pci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 	.id_table	= hv_pci_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	.probe		= hv_pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	.remove		= hv_pci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	.suspend	= hv_pci_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	.resume		= hv_pci_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) static void __exit exit_hv_pci_drv(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	vmbus_driver_unregister(&hv_pci_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 	hvpci_block_ops.read_block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	hvpci_block_ops.write_block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	hvpci_block_ops.reg_blk_invalidate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) static int __init init_hv_pci_drv(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	if (!hv_is_hyperv_initialized())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 	/* Set the invalid domain number's bit, so it will not be used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 	set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	/* Initialize PCI block r/w interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	hvpci_block_ops.read_block = hv_read_config_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	hvpci_block_ops.write_block = hv_write_config_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 	hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	return vmbus_driver_register(&hv_pci_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) module_init(init_hv_pci_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) module_exit(exit_hv_pci_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) MODULE_DESCRIPTION("Hyper-V PCI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) MODULE_LICENSE("GPL v2");