^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCI Backend Common Data Structures & Function Declarations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef __XEN_PCIBACK_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define __XEN_PCIBACK_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <xen/interface/io/pciif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define DRV_NAME "xen-pciback"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct pci_dev_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define _PDEVF_op_active (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define PDEVF_op_active (1<<(_PDEVF_op_active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define _PCIB_op_pending (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define PCIB_op_pending (1<<(_PCIB_op_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define _EOI_pending (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define EOI_pending (1<<(_EOI_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct xen_pcibk_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void *pci_dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct mutex dev_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct xenbus_device *xdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct xenbus_watch be_watch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u8 be_watching;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int evtchn_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct xen_pci_sharedinfo *sh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct work_struct op_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct xen_pci_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct xen_pcibk_dev_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct list_head config_fields;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct pci_saved_state *pci_saved_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned int permissive:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int allow_interrupt_control:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned int warned_on_write:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned int enable_intx:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int isr_on:1; /* Whether the IRQ handler is installed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned int ack_intr:1; /* .. and ACK-ing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned long handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) char irq_name[]; /* xen-pcibk[000:04:00.0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Used by XenBus and xen_pcibk_ops.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) extern wait_queue_head_t xen_pcibk_aer_wait_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Used by pcistub.c and conf_space_quirks.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) extern struct list_head xen_pcibk_quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int domain, int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int slot, int func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct pci_dev *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void pcistub_put_pci_dev(struct pci_dev *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Ensure a device is turned off or reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void xen_pcibk_reset_device(struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Access a virtual configuration space for a PCI device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int xen_pcibk_config_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int xen_pcibk_config_init_dev(struct pci_dev *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void xen_pcibk_config_reset_dev(struct pci_dev *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) void xen_pcibk_config_free_dev(struct pci_dev *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 *ret_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u32 value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Handle requests for specific devices from the frontend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) typedef int (*publish_pci_dev_cb) (struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned int domain, unsigned int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned int devfn, unsigned int devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) typedef int (*publish_pci_root_cb) (struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned int domain, unsigned int bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Backend registration for the two types of BDF representation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * vpci - BDFs start at 00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * passthrough - BDFs are exactly like in the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct xen_pcibk_backend {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int (*init)(struct xen_pcibk_device *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void (*free)(struct xen_pcibk_device *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int (*find)(struct pci_dev *pcidev, struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int *domain, unsigned int *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned int *devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) bool lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int (*add)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int devid, publish_pci_dev_cb publish_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct pci_dev *(*get)(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int domain, unsigned int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) extern const struct xen_pcibk_backend xen_pcibk_vpci_backend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) extern const struct xen_pcibk_backend xen_pcibk_passthrough_backend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) extern const struct xen_pcibk_backend *xen_pcibk_backend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) publish_pci_dev_cb publish_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (xen_pcibk_backend && xen_pcibk_backend->add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return xen_pcibk_backend->add(pdev, dev, devid, publish_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct pci_dev *dev, bool lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (xen_pcibk_backend && xen_pcibk_backend->release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return xen_pcibk_backend->release(pdev, dev, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static inline struct pci_dev *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned int bus, unsigned int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (xen_pcibk_backend && xen_pcibk_backend->get)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return xen_pcibk_backend->get(pdev, domain, bus, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * before sending aer request to pcifront, so that guest could identify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * device, coopearte with xen_pcibk to finish aer recovery job if device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * has the capability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static inline int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned int *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned int *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned int *devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (xen_pcibk_backend && xen_pcibk_backend->find)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return xen_pcibk_backend->find(pcidev, pdev, domain, bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (xen_pcibk_backend && xen_pcibk_backend->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return xen_pcibk_backend->init(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) publish_pci_root_cb cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (xen_pcibk_backend && xen_pcibk_backend->publish)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return xen_pcibk_backend->publish(pdev, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (xen_pcibk_backend && xen_pcibk_backend->free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return xen_pcibk_backend->free(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Handles events from front-end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void xen_pcibk_do_op(struct work_struct *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static inline void xen_pcibk_lateeoi(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned int eoi_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (test_and_clear_bit(_EOI_pending, &pdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) xen_irq_lateeoi(pdev->evtchn_irq, eoi_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int xen_pcibk_xenbus_register(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void xen_pcibk_xenbus_unregister(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Handles shared IRQs that can to device domain and control domain. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) void xen_pcibk_irq_handler(struct pci_dev *dev, int reset);