^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * PCI Stub Driver - Grabs devices in backend to be exported later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Ryan Wilson <hap9@epoch.ncsc.mil>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Chris Bookholt <hap10@epoch.ncsc.mil>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define dev_fmt pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/xen/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <xen/interface/physdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "pciback.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "conf_space.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "conf_space_quirks.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define PCISTUB_DRIVER_NAME "pciback"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static char *pci_devs_to_hide;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) wait_queue_head_t xen_pcibk_aer_wait_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * We want to avoid in middle of AER ops, xen_pcibk devices is being removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static DECLARE_RWSEM(pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) module_param_named(hide, pci_devs_to_hide, charp, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct pcistub_device_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct list_head slot_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned char bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned int devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static LIST_HEAD(pcistub_device_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static DEFINE_SPINLOCK(device_ids_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct pcistub_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct list_head dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Access to pcistub_devices & seized_devices lists and the initialize_devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * flag must be locked with pcistub_devices_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static DEFINE_SPINLOCK(pcistub_devices_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static LIST_HEAD(pcistub_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* wait for device_initcall before initializing our devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * (see pcistub_init_devices_late)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static int initialize_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static LIST_HEAD(seized_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) dev_dbg(&dev->dev, "pcistub_device_alloc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) psdev = kzalloc(sizeof(*psdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) psdev->dev = pci_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!psdev->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) kfree(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) kref_init(&psdev->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) spin_lock_init(&psdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Don't call this directly as it's called by pcistub_device_put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void pcistub_device_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct xen_pcibk_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) psdev = container_of(kref, struct pcistub_device, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) dev = psdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dev_data = pci_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) dev_dbg(&dev->dev, "pcistub_device_release\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) xen_unregister_device_domain_owner(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Call the reset function which does not take lock as this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * is called from "unbind" which takes a device_lock mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) __pci_reset_function_locked(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (dev_data &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) dev_info(&dev->dev, "Could not reload PCI state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pci_restore_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (dev->msix_cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct physdev_pci_device ppdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) .seg = pci_domain_nr(dev->bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) .bus = dev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) .devfn = dev->devfn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) &ppdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (err && err != -ENOSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Disable the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) xen_pcibk_reset_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) kfree(dev_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pci_set_drvdata(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Clean-up the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) xen_pcibk_config_free_dyn_fields(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) xen_pcibk_config_free_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pci_clear_dev_assigned(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) pci_dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) kfree(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline void pcistub_device_get(struct pcistub_device *psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) kref_get(&psdev->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline void pcistub_device_put(struct pcistub_device *psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) kref_put(&psdev->kref, pcistub_device_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static struct pcistub_device *pcistub_device_find_locked(int domain, int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int slot, int func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) list_for_each_entry(psdev, &pcistub_devices, dev_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (psdev->dev != NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) && domain == pci_domain_nr(psdev->dev->bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) && bus == psdev->dev->bus->number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) && slot == PCI_SLOT(psdev->dev->devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) && func == PCI_FUNC(psdev->dev->devfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static struct pcistub_device *pcistub_device_find(int domain, int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int slot, int func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) psdev = pcistub_device_find_locked(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) pcistub_device_get(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct pcistub_device *psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct pci_dev *pci_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pcistub_device_get(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) spin_lock_irqsave(&psdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!psdev->pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) psdev->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pci_dev = psdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) spin_unlock_irqrestore(&psdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pcistub_device_put(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int domain, int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int slot, int func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct pci_dev *found_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) psdev = pcistub_device_find_locked(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) found_dev = pcistub_device_get_pci_dev(pdev, psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return found_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct pci_dev *found_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) list_for_each_entry(psdev, &pcistub_devices, dev_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (psdev->dev == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) found_dev = pcistub_device_get_pci_dev(pdev, psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return found_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Called when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * - XenBus state has been disconnected (guest shutdown). See xen_pcibk_xenbus_remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * - 'echo BDF > unbind' on pciback module with no guest attached. See pcistub_remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * As such we have to be careful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * To make this easier, the caller has to hold the device lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void pcistub_put_pci_dev(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct pcistub_device *psdev, *found_psdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct xen_pcibk_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) list_for_each_entry(psdev, &pcistub_devices, dev_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (psdev->dev == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) found_psdev = psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (WARN_ON(!found_psdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*hold this lock for avoiding breaking link between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * pcistub and xen_pcibk when AER is in processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) down_write(&pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* Cleanup our device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * (so it's ready for the next domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) device_lock_assert(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) __pci_reset_function_locked(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) dev_data = pci_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ret = pci_load_saved_state(dev, dev_data->pci_saved_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * The usual sequence is pci_save_state & pci_restore_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * but the guest might have messed the configuration space up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Use the initial version (when device was bound to us).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) pci_restore_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) dev_info(&dev->dev, "Could not reload PCI state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* This disables the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) xen_pcibk_reset_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* And cleanup up our emulated fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) xen_pcibk_config_reset_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) xen_pcibk_config_free_dyn_fields(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) dev_data->allow_interrupt_control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) xen_unregister_device_domain_owner(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) spin_lock_irqsave(&found_psdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) found_psdev->pdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) spin_unlock_irqrestore(&found_psdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) pcistub_device_put(found_psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) up_write(&pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static int pcistub_match_one(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct pcistub_device_id *pdev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Match the specified device by domain, bus, slot, func and also if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * any of the device's parent bridges match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) for (; dev != NULL; dev = dev->bus->self) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (pci_domain_nr(dev->bus) == pdev_id->domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) && dev->bus->number == pdev_id->bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) && dev->devfn == pdev_id->devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* Sometimes topmost bridge links to itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (dev == dev->bus->self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static int pcistub_match(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct pcistub_device_id *pdev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) spin_lock_irqsave(&device_ids_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (pcistub_match_one(dev, pdev_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) spin_unlock_irqrestore(&device_ids_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static int pcistub_init_device(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct xen_pcibk_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dev_dbg(&dev->dev, "initializing...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* The PCI backend is not intended to be a module (or to work with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * removable PCI devices (yet). If it were, xen_pcibk_config_free()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * would need to be called somewhere to free the memory allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * here and then to call kfree(pci_get_drvdata(psdev->dev)).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) + strlen(pci_name(dev)) + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (!dev_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) pci_set_drvdata(dev, dev_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Setup name for fake IRQ handler. It will only be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * once the device is turned on by the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) dev_dbg(&dev->dev, "initializing config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) init_waitqueue_head(&xen_pcibk_aer_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) err = xen_pcibk_config_init_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * must do this here because pcibios_enable_device may specify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * the pci device's true irq (and possibly its other resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * if they differ from what's in the configuration space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * This makes the assumption that the device's resources won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * change after this point (otherwise this code may break!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dev_dbg(&dev->dev, "enabling device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) err = pci_enable_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) goto config_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (dev->msix_cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct physdev_pci_device ppdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) .seg = pci_domain_nr(dev->bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) .bus = dev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) .devfn = dev->devfn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (err && err != -ENOSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* We need the device active to save the state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dev_dbg(&dev->dev, "save state of device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) pci_save_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) dev_data->pci_saved_state = pci_store_saved_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!dev_data->pci_saved_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) __pci_reset_function_locked(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pci_restore_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* Now disable the device (this also ensures some private device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * data is setup before we export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev_dbg(&dev->dev, "reset device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) xen_pcibk_reset_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) pci_set_dev_assigned(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) config_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) xen_pcibk_config_free_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) pci_set_drvdata(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) kfree(dev_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * Because some initialization still happens on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * devices during fs_initcall, we need to defer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * full initialization of our devices until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * device_initcall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static int __init pcistub_init_devices_late(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) while (!list_empty(&seized_devices)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) psdev = container_of(seized_devices.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct pcistub_device, dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) list_del(&psdev->dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) err = pcistub_init_device(psdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) dev_err(&psdev->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) "error %d initializing device\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) kfree(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) psdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) list_add_tail(&psdev->dev_list, &pcistub_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) initialize_devices = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void pcistub_device_id_add_list(struct pcistub_device_id *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int domain, int bus, unsigned int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct pcistub_device_id *pci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) spin_lock_irqsave(&device_ids_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (pci_dev_id->domain == domain && pci_dev_id->bus == bus &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) pci_dev_id->devfn == devfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) new->domain = domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) new->bus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) new->devfn = devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) list_add_tail(&new->slot_list, &pcistub_device_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) spin_unlock_irqrestore(&device_ids_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static int pcistub_seize(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct pcistub_device_id *pci_dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) psdev = pcistub_device_alloc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (!psdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) kfree(pci_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (initialize_devices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* don't want irqs disabled when calling pcistub_init_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) err = pcistub_init_device(psdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) list_add(&psdev->dev_list, &pcistub_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dev_dbg(&dev->dev, "deferring initialization\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) list_add(&psdev->dev_list, &seized_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) kfree(pci_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) pcistub_device_put(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) } else if (pci_dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) pcistub_device_id_add_list(pci_dev_id, pci_domain_nr(dev->bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) dev->bus->number, dev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Called when 'bind'. This means we must _NOT_ call pci_reset_function or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * other functions that take the sysfs lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int err = 0, match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct pcistub_device_id *pci_dev_id = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) dev_dbg(&dev->dev, "probing...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) match = pcistub_match(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if ((dev->driver_override &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) !strcmp(dev->driver_override, PCISTUB_DRIVER_NAME)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dev_err(&dev->dev, "can't export pci devices that "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) "don't have a normal (0) or bridge (1) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) "header type!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (!match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!pci_dev_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dev_info(&dev->dev, "seizing device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) err = pcistub_seize(dev, pci_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Didn't find the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* Called when 'unbind'. This means we must _NOT_ call pci_reset_function or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * other functions that take the sysfs lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static void pcistub_remove(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct pcistub_device *psdev, *found_psdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dev_dbg(&dev->dev, "removing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) xen_pcibk_config_quirk_release(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) list_for_each_entry(psdev, &pcistub_devices, dev_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (psdev->dev == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) found_psdev = psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (found_psdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dev_dbg(&dev->dev, "found device to remove %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) found_psdev->pdev ? "- in-use" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (found_psdev->pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int domid = xen_find_device_domain_owner(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) dev_warn(&dev->dev, "****** removing device %s while still in-use by domain %d! ******\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) pci_name(found_psdev->dev), domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) dev_warn(&dev->dev, "****** driver domain may still access this device's i/o resources!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dev_warn(&dev->dev, "****** shutdown driver domain before binding device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dev_warn(&dev->dev, "****** to other drivers or domains\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* N.B. This ends up calling pcistub_put_pci_dev which ends up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * doing the FLR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) xen_pcibk_release_pci_dev(found_psdev->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) found_psdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) false /* caller holds the lock. */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) list_del(&found_psdev->dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* the final put for releasing from the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) pcistub_device_put(found_psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static const struct pci_device_id pcistub_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) .vendor = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) .device = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) .subvendor = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) .subdevice = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {0,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) #define PCI_NODENAME_MAX 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static void kill_domain_by_device(struct pcistub_device *psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct xenbus_transaction xbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) char nodename[PCI_NODENAME_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) BUG_ON(!psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) psdev->pdev->xdev->otherend_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) err = xenbus_transaction_start(&xbt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dev_err(&psdev->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) "error %d when start xenbus transaction\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /*PV AER handlers will set this flag*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) xenbus_printf(xbt, nodename, "aerState" , "aerfail");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) err = xenbus_transaction_end(xbt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (err == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) dev_err(&psdev->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) "error %d when end xenbus transaction\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * backend need to have cooperation. In xen_pcibk, those steps will do similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * jobs: send service request and waiting for front_end response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static pci_ers_result_t common_process(struct pcistub_device *psdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) pci_channel_state_t state, int aer_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) pci_ers_result_t result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) pci_ers_result_t res = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct xen_pcie_aer_op *aer_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct xen_pcibk_device *pdev = psdev->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct xen_pci_sharedinfo *sh_info = pdev->sh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /*with PV AER drivers*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) aer_op = &(sh_info->aer_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) aer_op->cmd = aer_cmd ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*useful for error_detected callback*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) aer_op->err = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /*pcifront_end BDF*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) &aer_op->domain, &aer_op->bus, &aer_op->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dev_err(&psdev->dev->dev, "failed to get pcifront device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return PCI_ERS_RESULT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dev_dbg(&psdev->dev->dev, "aer_op %x dom %x bus %x devfn %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /*local flag to mark there's aer request, xen_pcibk callback will use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * this flag to judge whether we need to check pci-front give aer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * service ack signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) set_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /*It is possible that a pcifront conf_read_write ops request invokes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * the callback which cause the spurious execution of wake_up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * Yet it is harmless and better than a spinlock here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) set_bit(_XEN_PCIB_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) (unsigned long *)&sh_info->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) notify_remote_via_irq(pdev->evtchn_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* Enable IRQ to signal "request done". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) xen_pcibk_lateeoi(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) !(test_bit(_XEN_PCIB_active, (unsigned long *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) &sh_info->flags)), 300*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* Enable IRQ for pcifront request if not already active. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (!test_bit(_PDEVF_op_active, &pdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) xen_pcibk_lateeoi(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (test_bit(_XEN_PCIB_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) (unsigned long *)&sh_info->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) dev_err(&psdev->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) "pcifront aer process not responding!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) clear_bit(_XEN_PCIB_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) (unsigned long *)&sh_info->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) aer_op->err = PCI_ERS_RESULT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) res = (pci_ers_result_t)aer_op->err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * of the device driver could provide this service, and then wait for pcifront
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * ack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * @dev: pointer to PCI devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * return value is used by aer_core do_recovery policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) pci_ers_result_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) result = PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) dev->bus->number, dev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) down_write(&pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) psdev = pcistub_device_find(pci_domain_nr(dev->bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) dev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) PCI_SLOT(dev->devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) PCI_FUNC(dev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!psdev || !psdev->pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) dev_err(&dev->dev, "device is not found/assigned\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!psdev->pdev->sh_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dev_err(&dev->dev, "device is not connected or owned"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) " by HVM, kill it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) kill_domain_by_device(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!test_bit(_XEN_PCIB_AERHANDLER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) (unsigned long *)&psdev->pdev->sh_info->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) "guest with no AER driver should have been killed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (result == PCI_ERS_RESULT_NONE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) result == PCI_ERS_RESULT_DISCONNECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) dev_dbg(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) "No AER slot_reset service or disconnected!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) kill_domain_by_device(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pcistub_device_put(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) up_write(&pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * in case of the device driver could provide this service, and then wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * for pcifront ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * @dev: pointer to PCI devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * return value is used by aer_core do_recovery policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) pci_ers_result_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) result = PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) dev->bus->number, dev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) down_write(&pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) psdev = pcistub_device_find(pci_domain_nr(dev->bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) PCI_SLOT(dev->devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) PCI_FUNC(dev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!psdev || !psdev->pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) dev_err(&dev->dev, "device is not found/assigned\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (!psdev->pdev->sh_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dev_err(&dev->dev, "device is not connected or owned"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) " by HVM, kill it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) kill_domain_by_device(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (!test_bit(_XEN_PCIB_AERHANDLER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) (unsigned long *)&psdev->pdev->sh_info->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) "guest with no AER driver should have been killed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (result == PCI_ERS_RESULT_NONE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) result == PCI_ERS_RESULT_DISCONNECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) dev_dbg(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) "No AER mmio_enabled service or disconnected!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) kill_domain_by_device(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) pcistub_device_put(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) up_write(&pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /*xen_pcibk_error_detected: it will send the error_detected request to pcifront
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * in case of the device driver could provide this service, and then wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * for pcifront ack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * @dev: pointer to PCI devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * @error: the current PCI connection state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * return value is used by aer_core do_recovery policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) pci_channel_state_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) pci_ers_result_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) result = PCI_ERS_RESULT_CAN_RECOVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) dev->bus->number, dev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) down_write(&pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) psdev = pcistub_device_find(pci_domain_nr(dev->bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) PCI_SLOT(dev->devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) PCI_FUNC(dev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (!psdev || !psdev->pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) dev_err(&dev->dev, "device is not found/assigned\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (!psdev->pdev->sh_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dev_err(&dev->dev, "device is not connected or owned"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) " by HVM, kill it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) kill_domain_by_device(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /*Guest owns the device yet no aer handler regiested, kill guest*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (!test_bit(_XEN_PCIB_AERHANDLER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) (unsigned long *)&psdev->pdev->sh_info->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) kill_domain_by_device(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (result == PCI_ERS_RESULT_NONE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) result == PCI_ERS_RESULT_DISCONNECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) dev_dbg(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) "No AER error_detected service or disconnected!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) kill_domain_by_device(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) pcistub_device_put(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) up_write(&pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /*xen_pcibk_error_resume: it will send the error_resume request to pcifront
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * in case of the device driver could provide this service, and then wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * for pcifront ack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * @dev: pointer to PCI devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static void xen_pcibk_error_resume(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) dev->bus->number, dev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) down_write(&pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) psdev = pcistub_device_find(pci_domain_nr(dev->bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) dev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) PCI_SLOT(dev->devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) PCI_FUNC(dev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (!psdev || !psdev->pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dev_err(&dev->dev, "device is not found/assigned\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (!psdev->pdev->sh_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) dev_err(&dev->dev, "device is not connected or owned"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) " by HVM, kill it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) kill_domain_by_device(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (!test_bit(_XEN_PCIB_AERHANDLER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) (unsigned long *)&psdev->pdev->sh_info->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) "guest with no AER driver should have been killed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) kill_domain_by_device(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) common_process(psdev, 1, XEN_PCI_OP_aer_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) PCI_ERS_RESULT_RECOVERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) pcistub_device_put(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) up_write(&pcistub_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /*add xen_pcibk AER handling*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) static const struct pci_error_handlers xen_pcibk_error_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .error_detected = xen_pcibk_error_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .mmio_enabled = xen_pcibk_mmio_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) .slot_reset = xen_pcibk_slot_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .resume = xen_pcibk_error_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * for a normal device. I don't want it to be loaded automatically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) static struct pci_driver xen_pcibk_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /* The name should be xen_pciback, but until the tools are updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * we will keep it as pciback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) .name = PCISTUB_DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) .id_table = pcistub_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) .probe = pcistub_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) .remove = pcistub_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .err_handler = &xen_pcibk_error_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static inline int str_to_slot(const char *buf, int *domain, int *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) int *slot, int *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) int parsed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) &parsed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) *func = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) *slot = *func = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (parsed && !buf[parsed])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /* try again without domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) *domain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) *func = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) sscanf(buf, " %x:%x.* %n", bus, slot, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) *slot = *func = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) sscanf(buf, " %x:*.* %n", bus, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (parsed && !buf[parsed])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) *slot, int *func, int *reg, int *size, int *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int parsed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) reg, size, mask, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (parsed && !buf[parsed])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* try again without domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) *domain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) mask, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (parsed && !buf[parsed])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static int pcistub_device_id_add(int domain, int bus, int slot, int func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct pcistub_device_id *pci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) int rc = 0, devfn = PCI_DEVFN(slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (slot < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) for (slot = 0; !rc && slot < 32; ++slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) rc = pcistub_device_id_add(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (func < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) for (func = 0; !rc && func < 8; ++func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) rc = pcistub_device_id_add(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if ((
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) #if !defined(MODULE) /* pci_domains_supported is not being exported */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) || !defined(CONFIG_PCI_DOMAINS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) !pci_domains_supported ? domain :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) domain < 0 || domain > 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) || bus < 0 || bus > 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) || PCI_SLOT(devfn) != slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) || PCI_FUNC(devfn) != func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (!pci_dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) pr_debug("wants to seize %04x:%02x:%02x.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) pcistub_device_id_add_list(pci_dev_id, domain, bus, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct pcistub_device_id *pci_dev_id, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) int err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) spin_lock_irqsave(&device_ids_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) slot_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (pci_dev_id->domain == domain && pci_dev_id->bus == bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /* Don't break; here because it's possible the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * slot could be in the list more than once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) list_del(&pci_dev_id->slot_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) kfree(pci_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) pr_debug("removed %04x:%02x:%02x.%d from seize list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) spin_unlock_irqrestore(&device_ids_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static int pcistub_reg_add(int domain, int bus, int slot, int func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) unsigned int reg, unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unsigned int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) struct config_field *field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (reg > 0xfff || (size < 4 && (mask >> (size * 8))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) psdev = pcistub_device_find(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (!psdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) dev = psdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) field = kzalloc(sizeof(*field), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (!field) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) field->offset = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) field->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) field->mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) field->init = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) field->reset = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) field->release = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) field->clean = xen_pcibk_config_field_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) err = xen_pcibk_config_quirks_add_field(dev, field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) kfree(field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) pcistub_device_put(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static ssize_t new_slot_store(struct device_driver *drv, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int domain, bus, slot, func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) err = str_to_slot(buf, &domain, &bus, &slot, &func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) err = pcistub_device_id_add(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) err = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static DRIVER_ATTR_WO(new_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static ssize_t remove_slot_store(struct device_driver *drv, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) int domain, bus, slot, func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) err = str_to_slot(buf, &domain, &bus, &slot, &func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) err = pcistub_device_id_remove(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) err = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static DRIVER_ATTR_WO(remove_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static ssize_t slots_show(struct device_driver *drv, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct pcistub_device_id *pci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) size_t count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) spin_lock_irqsave(&device_ids_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (count >= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) count += scnprintf(buf + count, PAGE_SIZE - count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) "%04x:%02x:%02x.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) pci_dev_id->domain, pci_dev_id->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) PCI_SLOT(pci_dev_id->devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) PCI_FUNC(pci_dev_id->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) spin_unlock_irqrestore(&device_ids_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static DRIVER_ATTR_RO(slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) static ssize_t irq_handlers_show(struct device_driver *drv, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct xen_pcibk_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) size_t count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) list_for_each_entry(psdev, &pcistub_devices, dev_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (count >= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (!psdev->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) dev_data = pci_get_drvdata(psdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (!dev_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) count +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) scnprintf(buf + count, PAGE_SIZE - count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) "%s:%s:%sing:%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) pci_name(psdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) dev_data->isr_on ? "on" : "off",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) dev_data->ack_intr ? "ack" : "not ack",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) dev_data->handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) static DRIVER_ATTR_RO(irq_handlers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static ssize_t irq_handler_state_store(struct device_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct xen_pcibk_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) int domain, bus, slot, func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) err = str_to_slot(buf, &domain, &bus, &slot, &func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) psdev = pcistub_device_find(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (!psdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) dev_data = pci_get_drvdata(psdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (!dev_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) dev_data->irq_name, dev_data->isr_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) !dev_data->isr_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) dev_data->isr_on = !(dev_data->isr_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (dev_data->isr_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) dev_data->ack_intr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) pcistub_device_put(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) err = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static DRIVER_ATTR_WO(irq_handler_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) static ssize_t quirks_store(struct device_driver *drv, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) int domain, bus, slot, func, reg, size, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) err = str_to_quirk(buf, &domain, &bus, &slot, &func, ®, &size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) err = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static ssize_t quirks_show(struct device_driver *drv, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) struct xen_pcibk_config_quirk *quirk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) struct xen_pcibk_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) const struct config_field *field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) const struct config_field_entry *cfg_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) spin_lock_irqsave(&device_ids_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (count >= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) count += scnprintf(buf + count, PAGE_SIZE - count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) quirk->pdev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) PCI_SLOT(quirk->pdev->devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) PCI_FUNC(quirk->pdev->devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) quirk->devid.vendor, quirk->devid.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) quirk->devid.subvendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) quirk->devid.subdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) dev_data = pci_get_drvdata(quirk->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) field = cfg_entry->field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (count >= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) count += scnprintf(buf + count, PAGE_SIZE - count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) "\t\t%08x:%01x:%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) cfg_entry->base_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) field->offset, field->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) field->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) spin_unlock_irqrestore(&device_ids_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static DRIVER_ATTR_RW(quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) static ssize_t permissive_store(struct device_driver *drv, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) int domain, bus, slot, func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct xen_pcibk_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) err = str_to_slot(buf, &domain, &bus, &slot, &func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) psdev = pcistub_device_find(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (!psdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) dev_data = pci_get_drvdata(psdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /* the driver data for a device should never be null at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (!dev_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (!dev_data->permissive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) dev_data->permissive = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /* Let user know that what they're doing could be unsafe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) dev_warn(&psdev->dev->dev, "enabling permissive mode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) "configuration space accesses!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) dev_warn(&psdev->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) "permissive mode is potentially unsafe!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) pcistub_device_put(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) err = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) static ssize_t permissive_show(struct device_driver *drv, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) struct xen_pcibk_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) size_t count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) list_for_each_entry(psdev, &pcistub_devices, dev_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (count >= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (!psdev->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) dev_data = pci_get_drvdata(psdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (!dev_data || !dev_data->permissive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) count +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) pci_name(psdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static DRIVER_ATTR_RW(permissive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) static ssize_t allow_interrupt_control_store(struct device_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) int domain, bus, slot, func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) struct xen_pcibk_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) err = str_to_slot(buf, &domain, &bus, &slot, &func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) psdev = pcistub_device_find(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (!psdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) dev_data = pci_get_drvdata(psdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /* the driver data for a device should never be null at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (!dev_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) dev_data->allow_interrupt_control = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) pcistub_device_put(psdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) err = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static ssize_t allow_interrupt_control_show(struct device_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct pcistub_device *psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct xen_pcibk_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) size_t count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) list_for_each_entry(psdev, &pcistub_devices, dev_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (count >= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (!psdev->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) dev_data = pci_get_drvdata(psdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (!dev_data || !dev_data->allow_interrupt_control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) count +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) pci_name(psdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) static DRIVER_ATTR_RW(allow_interrupt_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static void pcistub_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) driver_remove_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) &driver_attr_remove_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) driver_remove_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) &driver_attr_permissive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) driver_remove_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) &driver_attr_allow_interrupt_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) driver_remove_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) &driver_attr_irq_handlers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) driver_remove_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) &driver_attr_irq_handler_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) pci_unregister_driver(&xen_pcibk_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static int __init pcistub_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) int pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) int domain, bus, slot, func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) int parsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (pci_devs_to_hide && *pci_devs_to_hide) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) parsed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) err = sscanf(pci_devs_to_hide + pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) " (%x:%x:%x.%x) %n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) &domain, &bus, &slot, &func, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) func = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) sscanf(pci_devs_to_hide + pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) " (%x:%x:%x.*) %n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) &domain, &bus, &slot, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) slot = func = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) sscanf(pci_devs_to_hide + pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) " (%x:%x:*.*) %n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) &domain, &bus, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (!parsed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) domain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) err = sscanf(pci_devs_to_hide + pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) " (%x:%x.%x) %n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) &bus, &slot, &func, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) func = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) sscanf(pci_devs_to_hide + pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) " (%x:%x.*) %n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) &bus, &slot, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) slot = func = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) sscanf(pci_devs_to_hide + pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) " (%x:*.*) %n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) &bus, &parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (parsed <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) goto parse_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) err = pcistub_device_id_add(domain, bus, slot, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) pos += parsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) } while (pci_devs_to_hide[pos]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /* If we're the first PCI Device Driver to register, we're the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * first one to get offered PCI devices as they become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * available (and thus we can be the first to grab them)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) err = pci_register_driver(&xen_pcibk_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) err = driver_create_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) &driver_attr_new_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) err = driver_create_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) &driver_attr_remove_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) err = driver_create_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) &driver_attr_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) err = driver_create_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) &driver_attr_quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) err = driver_create_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) &driver_attr_permissive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) err = driver_create_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) &driver_attr_allow_interrupt_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) err = driver_create_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) &driver_attr_irq_handlers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) err = driver_create_file(&xen_pcibk_pci_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) &driver_attr_irq_handler_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) pcistub_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) parse_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) pr_err("Error parsing pci_devs_to_hide at \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) pci_devs_to_hide + pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * fs_initcall happens before device_initcall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * so xen_pcibk *should* get called first (b/c we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * want to suck up any device before other drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * get a chance by being the first pci device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * driver to register)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) fs_initcall(pcistub_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) #ifdef CONFIG_PCI_IOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static struct pcistub_device *find_vfs(const struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct pcistub_device *psdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) spin_lock_irqsave(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) list_for_each_entry(psdev, &pcistub_devices, dev_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (!psdev->pdev && psdev->dev != pdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) && pci_physfn(psdev->dev) == pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) spin_unlock_irqrestore(&pcistub_devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) return psdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) static int pci_stub_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) unsigned long action, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) struct device *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) const struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (action != BUS_NOTIFY_UNBIND_DRIVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (!pdev->is_physfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) struct pcistub_device *psdev = find_vfs(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (!psdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) device_release_driver(&psdev->dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static struct notifier_block pci_stub_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) .notifier_call = pci_stub_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static int __init xen_pcibk_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (!xen_initial_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) err = xen_pcibk_config_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) err = pcistub_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) pcistub_init_devices_late();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) err = xen_pcibk_xenbus_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) pcistub_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) #ifdef CONFIG_PCI_IOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) bus_register_notifier(&pci_bus_type, &pci_stub_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static void __exit xen_pcibk_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) #ifdef CONFIG_PCI_IOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) bus_unregister_notifier(&pci_bus_type, &pci_stub_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) xen_pcibk_xenbus_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) pcistub_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) module_init(xen_pcibk_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) module_exit(xen_pcibk_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) MODULE_LICENSE("Dual BSD/GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) MODULE_ALIAS("xen-backend:pci");