^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Virtio PCI driver - APIs for common functionality for all device versions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This module allows virtio devices to be used over a virtual PCI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This can be used with QEMU based VMMs like KVM or Xen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright IBM Corp. 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright Red Hat, Inc. 2014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Anthony Liguori <aliguori@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Rusty Russell <rusty@rustcorp.com.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Michael S. Tsirkin <mst@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/virtio_config.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/virtio_ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/virtio_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct virtio_pci_vq_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* the actual virtqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* the list node for the virtqueues list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* MSI-X vector (or none) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned msix_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Our device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct virtio_pci_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct virtio_device vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct pci_dev *pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* In legacy mode, these two point to within ->legacy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* Where to read and clear interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u8 __iomem *isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Modern only fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* The IO mapping for the PCI config space (non-legacy mode) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct virtio_pci_common_cfg __iomem *common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Device-specific data (non-legacy mode) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void __iomem *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Base of vq notifications (non-legacy mode). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void __iomem *notify_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* So we can sanity-check accesses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) size_t notify_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) size_t device_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Capability for when we need to map notifications per-vq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int notify_map_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Multiply queue_notify_off by this value. (non-legacy mode). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 notify_offset_multiplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int modern_bars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Legacy only field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* the IO mapping for the PCI config space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* a list of queues so we can dispatch IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct list_head virtqueues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* array of all queues for house-keeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct virtio_pci_vq_info **vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* MSI-X support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int msix_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int intx_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) cpumask_var_t *msix_affinity_masks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Name strings for interrupts. This size should be enough,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * and I'm too lazy to allocate each name separately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) char (*msix_names)[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Number of available vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned msix_vectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Vectors allocated, excluding per-vq vectors if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned msix_used_vectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Whether we have vector per vq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) bool per_vq_vectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct virtio_pci_vq_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void (*callback)(struct virtqueue *vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bool ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) u16 msix_vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void (*del_vq)(struct virtio_pci_vq_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Constants for MSI-X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Use first vector for configuration changes, second and the rest for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * virtqueues Thus, we need at least 2 vectors for MSI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) VP_MSIX_CONFIG_VECTOR = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) VP_MSIX_VQ_VECTOR = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Convert a generic virtio device to our structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return container_of(vdev, struct virtio_pci_device, vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* wait for pending irq handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void vp_synchronize_vectors(struct virtio_device *vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* the notify function used when creating a virt queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) bool vp_notify(struct virtqueue *vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* the config->del_vqs() implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void vp_del_vqs(struct virtio_device *vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* the config->find_vqs() implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct virtqueue *vqs[], vq_callback_t *callbacks[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) const char * const names[], const bool *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct irq_affinity *desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) const char *vp_bus_name(struct virtio_device *vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Setup the affinity for a virtqueue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * - force the affinity for per vq vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * - OR over all affinities for shared MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * - ignore the affinity request if we're using INTX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int virtio_pci_legacy_probe(struct virtio_pci_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void virtio_pci_legacy_remove(struct virtio_pci_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int virtio_pci_modern_probe(struct virtio_pci_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) void virtio_pci_modern_remove(struct virtio_pci_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #endif