^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCI Backend - Provides restricted access to the real PCI bus topology
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * to the frontend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "pciback.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct passthrough_dev_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* Access to dev_list must be protected by lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct list_head dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned int domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct pci_dev_entry *dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct pci_dev *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) mutex_lock(&dev_data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) && bus == (unsigned int)dev_entry->dev->bus->number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) && devfn == dev_entry->dev->devfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) dev = dev_entry->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) mutex_unlock(&dev_data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int devid, publish_pci_dev_cb publish_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct pci_dev_entry *dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int domain, bus, devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (!dev_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) dev_entry->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) mutex_lock(&dev_data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) list_add_tail(&dev_entry->list, &dev_data->dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) mutex_unlock(&dev_data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Publish this device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) domain = (unsigned int)pci_domain_nr(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) bus = (unsigned int)dev->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) devfn = dev->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) err = publish_cb(pdev, domain, bus, devfn, devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct pci_dev *dev, bool lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct pci_dev_entry *dev_entry, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct pci_dev *found_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mutex_lock(&dev_data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (dev_entry->dev == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) list_del(&dev_entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) found_dev = dev_entry->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) kfree(dev_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) mutex_unlock(&dev_data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (found_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) device_lock(&found_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pcistub_put_pci_dev(found_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) device_unlock(&found_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct passthrough_dev_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!dev_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) mutex_init(&dev_data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) INIT_LIST_HEAD(&dev_data->dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pdev->pci_dev_data = dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) publish_pci_root_cb publish_root_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct pci_dev_entry *dev_entry, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned int domain, bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mutex_lock(&dev_data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Only publish this device as a root if none of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * parent bridges are exported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) dev = dev_entry->dev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) for (; !found && dev != NULL; dev = dev->bus->self) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) list_for_each_entry(e, &dev_data->dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (dev == e->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) bus = (unsigned int)dev_entry->dev->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) err = publish_root_cb(pdev, domain, bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mutex_unlock(&dev_data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct pci_dev_entry *dev_entry, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct pci_dev *dev = dev_entry->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) list_del(&dev_entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) device_lock(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) pcistub_put_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) device_unlock(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) kfree(dev_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) kfree(dev_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) pdev->pci_dev_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct xen_pcibk_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned int *domain, unsigned int *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned int *devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) *domain = pci_domain_nr(pcidev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *bus = pcidev->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *devfn = pcidev->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .name = "passthrough",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .init = __xen_pcibk_init_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) .free = __xen_pcibk_release_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) .find = __xen_pcibk_get_pcifront_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) .publish = __xen_pcibk_publish_pci_roots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) .release = __xen_pcibk_release_pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) .add = __xen_pcibk_add_pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) .get = __xen_pcibk_get_pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };