^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Virtio PCI driver - modern (virtio 1.0) device support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This module allows virtio devices to be used over a virtual PCI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This can be used with QEMU based VMMs like KVM or Xen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright IBM Corp. 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright Red Hat, Inc. 2014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Anthony Liguori <aliguori@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Rusty Russell <rusty@rustcorp.com.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Michael S. Tsirkin <mst@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define VIRTIO_PCI_NO_LEGACY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define VIRTIO_RING_NO_LEGACY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "virtio_pci_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Type-safe wrappers for io accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Use these to enforce at compile time the following spec requirement:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * The driver MUST access each field using the “natural” access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * for 16-bit fields and 8-bit accesses for 8-bit fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline u8 vp_ioread8(const u8 __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return ioread8(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static inline u16 vp_ioread16 (const __le16 __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return ioread16(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static inline u32 vp_ioread32(const __le32 __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return ioread32(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) iowrite8(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) iowrite16(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) iowrite32(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void vp_iowrite64_twopart(u64 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) __le32 __iomem *lo, __le32 __iomem *hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) vp_iowrite32((u32)val, lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) vp_iowrite32(val >> 32, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static void __iomem *map_capability(struct pci_dev *dev, int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) size_t minlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u32 start, u32 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) size_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u8 bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 offset, length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void __iomem *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) bar),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) &bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) &length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (length <= start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) "virtio_pci: bad capability len %u (>%u expected)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) length, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (length - start < minlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) "virtio_pci: bad capability len %u (>=%zu expected)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) length, minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) length -= start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (start + offset < offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) "virtio_pci: map wrap-around %u+%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) start, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) offset += start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (offset & (align - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) "virtio_pci: offset %u not aligned to %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) offset, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (length > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) length = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *len = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (minlen + offset < minlen ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) minlen + offset > pci_resource_len(dev, bar)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) "virtio_pci: map virtio %zu@%u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) "out of range on bar %i length %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) minlen, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) bar, (unsigned long)pci_resource_len(dev, bar));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) p = pci_iomap_range(dev, bar, offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) "virtio_pci: unable to map virtio %u@%u on bar %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) length, offset, bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* virtio config->get_features() implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static u64 vp_get_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u64 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) vp_iowrite32(0, &vp_dev->common->device_feature_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) features = vp_ioread32(&vp_dev->common->device_feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) vp_iowrite32(1, &vp_dev->common->device_feature_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void vp_transport_features(struct virtio_device *vdev, u64 features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct pci_dev *pci_dev = vp_dev->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* virtio config->finalize_features() implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int vp_finalize_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u64 features = vdev->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Give virtio_ring a chance to accept features. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) vring_transport_features(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Give virtio_pci a chance to accept features. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) vp_transport_features(vdev, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) dev_err(&vdev->dev, "virtio: device uses modern interface "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) "but does not have VIRTIO_F_VERSION_1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) vp_iowrite32(0, &vp_dev->common->guest_feature_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) vp_iowrite32(1, &vp_dev->common->guest_feature_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* virtio config->get() implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static void vp_get(struct virtio_device *vdev, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void *buf, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u8 b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __le16 w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) __le32 l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) BUG_ON(offset + len > vp_dev->device_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) switch (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) b = ioread8(vp_dev->device + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) memcpy(buf, &b, sizeof b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) w = cpu_to_le16(ioread16(vp_dev->device + offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) memcpy(buf, &w, sizeof w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) l = cpu_to_le32(ioread32(vp_dev->device + offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) memcpy(buf, &l, sizeof l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) l = cpu_to_le32(ioread32(vp_dev->device + offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) memcpy(buf, &l, sizeof l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) memcpy(buf + sizeof l, &l, sizeof l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* the config->set() implementation. it's symmetric to the config->get()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void vp_set(struct virtio_device *vdev, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) const void *buf, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u8 b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) __le16 w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) __le32 l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) BUG_ON(offset + len > vp_dev->device_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) switch (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) memcpy(&b, buf, sizeof b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) iowrite8(b, vp_dev->device + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) memcpy(&w, buf, sizeof w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) iowrite16(le16_to_cpu(w), vp_dev->device + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) memcpy(&l, buf, sizeof l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) iowrite32(le32_to_cpu(l), vp_dev->device + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) memcpy(&l, buf, sizeof l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) iowrite32(le32_to_cpu(l), vp_dev->device + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) memcpy(&l, buf + sizeof l, sizeof l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static u32 vp_generation(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return vp_ioread8(&vp_dev->common->config_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* config->{get,set}_status() implementations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static u8 vp_get_status(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return vp_ioread8(&vp_dev->common->device_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static void vp_set_status(struct virtio_device *vdev, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* We should never be setting status to 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) BUG_ON(status == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) vp_iowrite8(status, &vp_dev->common->device_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static void vp_reset(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* 0 status means a reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) vp_iowrite8(0, &vp_dev->common->device_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* After writing 0 to device_status, the driver MUST wait for a read of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * device_status to return 0 before reinitializing the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * This will flush out the status write, and flush in device writes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * including MSI-X interrupts, if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) while (vp_ioread8(&vp_dev->common->device_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* Flush pending VQ/configuration callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) vp_synchronize_vectors(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* Setup the vector used for configuration events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) vp_iowrite16(vector, &vp_dev->common->msix_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Verify we had enough resources to assign the vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Will also flush the write out to device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return vp_ioread16(&vp_dev->common->msix_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct virtio_pci_vq_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) unsigned index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) void (*callback)(struct virtqueue *vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) bool ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u16 msix_vec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u16 num, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (index >= vp_ioread16(&cfg->num_queues))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Select the queue we're interested in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) vp_iowrite16(index, &cfg->queue_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Check if queue is either not available or already active. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) num = vp_ioread16(&cfg->queue_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!num || vp_ioread16(&cfg->queue_enable))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (num & (num - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* get offset of notification word for this vq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) off = vp_ioread16(&cfg->queue_notify_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) info->msix_vector = msix_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* create the vring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) vq = vring_create_virtqueue(index, num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) SMP_CACHE_BYTES, &vp_dev->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) true, true, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) vp_notify, callback, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* activate the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) vp_iowrite64_twopart(virtqueue_get_desc_addr(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) &cfg->queue_desc_lo, &cfg->queue_desc_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) vp_iowrite64_twopart(virtqueue_get_avail_addr(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) &cfg->queue_avail_lo, &cfg->queue_avail_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) vp_iowrite64_twopart(virtqueue_get_used_addr(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) &cfg->queue_used_lo, &cfg->queue_used_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (vp_dev->notify_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* offset should not wrap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if ((u64)off * vp_dev->notify_offset_multiplier + 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) > vp_dev->notify_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dev_warn(&vp_dev->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) "bad notification offset %u (x %u) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) "for queue %u > %zd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) off, vp_dev->notify_offset_multiplier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) index, vp_dev->notify_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto err_map_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) vq->priv = (void __force *)vp_dev->notify_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) off * vp_dev->notify_offset_multiplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) vq->priv = (void __force *)map_capability(vp_dev->pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) vp_dev->notify_map_cap, 2, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) off * vp_dev->notify_offset_multiplier, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!vq->priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) goto err_map_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) vp_iowrite16(msix_vec, &cfg->queue_msix_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) msix_vec = vp_ioread16(&cfg->queue_msix_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto err_assign_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) err_assign_vector:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (!vp_dev->notify_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) err_map_notify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) vring_del_virtqueue(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct virtqueue *vqs[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) vq_callback_t *callbacks[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) const char * const names[], const bool *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct irq_affinity *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* Select and activate all queues. Has to be done last: once we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * this, there's no way to go back except reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) list_for_each_entry(vq, &vdev->vqs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) vp_iowrite16(vq->index, &vp_dev->common->queue_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) vp_iowrite16(1, &vp_dev->common->queue_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void del_vq(struct virtio_pci_vq_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct virtqueue *vq = info->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) vp_iowrite16(vq->index, &vp_dev->common->queue_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (vp_dev->msix_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) &vp_dev->common->queue_msix_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* Flush the write out to device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) vp_ioread16(&vp_dev->common->queue_msix_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!vp_dev->notify_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) vring_del_virtqueue(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) u8 *bar, u64 *offset, u64 *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) u8 type, cap_len, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) u64 res_offset, res_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) cfg_type), &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) cap_len), &cap_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (cap_len != sizeof(struct virtio_pci_cap64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dev_err(&dev->dev, "%s: shm cap with bad size offset:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) " %d size: %d\n", __func__, pos, cap_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) id), &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (id != required_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Type, and ID match, looks good */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) bar), bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* Read the lower 32bit of length and offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) offset), &tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) res_offset = tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) length), &tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) res_length = tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* and now the top half */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) pci_read_config_dword(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) pos + offsetof(struct virtio_pci_cap64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) offset_hi), &tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) res_offset |= ((u64)tmp32) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) pci_read_config_dword(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pos + offsetof(struct virtio_pci_cap64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) length_hi), &tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) res_length |= ((u64)tmp32) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *offset = res_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) *len = res_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static bool vp_get_shm_region(struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct virtio_shm_region *region, u8 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct pci_dev *pci_dev = vp_dev->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) u8 bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) u64 offset, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) phys_addr_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) size_t bar_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) phys_addr = pci_resource_start(pci_dev, bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) bar_len = pci_resource_len(pci_dev, bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if ((offset + len) < offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (offset + len > bar_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) region->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) region->addr = (u64) phys_addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) .get = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) .set = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) .generation = vp_generation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) .get_status = vp_get_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) .set_status = vp_set_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) .reset = vp_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) .find_vqs = vp_modern_find_vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) .del_vqs = vp_del_vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) .get_features = vp_get_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) .finalize_features = vp_finalize_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) .bus_name = vp_bus_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) .set_vq_affinity = vp_set_vq_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .get_vq_affinity = vp_get_vq_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .get_shm_region = vp_get_shm_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static const struct virtio_config_ops virtio_pci_config_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) .get = vp_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) .set = vp_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) .generation = vp_generation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) .get_status = vp_get_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) .set_status = vp_set_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) .reset = vp_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) .find_vqs = vp_modern_find_vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) .del_vqs = vp_del_vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) .get_features = vp_get_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) .finalize_features = vp_finalize_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) .bus_name = vp_bus_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) .set_vq_affinity = vp_set_vq_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) .get_vq_affinity = vp_get_vq_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) .get_shm_region = vp_get_shm_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * virtio_pci_find_capability - walk capabilities to find device info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * @dev: the pci device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * @bars: the bitmask of BARs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * Returns offset of the capability, or 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) u32 ioresource_types, int *bars)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) pos > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) u8 type, bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) cfg_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) bar),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) &bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Ignore structures with reserved BAR values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (bar > 0x5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (type == cfg_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (pci_resource_len(dev, bar) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) pci_resource_flags(dev, bar) & ioresource_types) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) *bars |= (1 << bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* This is part of the ABI. Don't screw with it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static inline void check_offsets(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Note: disk space was harmed in compilation of this function. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) offsetof(struct virtio_pci_cap, cap_vndr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) offsetof(struct virtio_pci_cap, cap_next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) offsetof(struct virtio_pci_cap, cap_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) offsetof(struct virtio_pci_cap, cfg_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) offsetof(struct virtio_pci_cap, bar));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) offsetof(struct virtio_pci_cap, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) offsetof(struct virtio_pci_cap, length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) offsetof(struct virtio_pci_notify_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) notify_off_multiplier));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) offsetof(struct virtio_pci_common_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) device_feature_select));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) offsetof(struct virtio_pci_common_cfg, device_feature));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) offsetof(struct virtio_pci_common_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) guest_feature_select));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) offsetof(struct virtio_pci_common_cfg, guest_feature));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) offsetof(struct virtio_pci_common_cfg, msix_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) offsetof(struct virtio_pci_common_cfg, num_queues));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) offsetof(struct virtio_pci_common_cfg, device_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) offsetof(struct virtio_pci_common_cfg, config_generation));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) offsetof(struct virtio_pci_common_cfg, queue_select));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) offsetof(struct virtio_pci_common_cfg, queue_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) offsetof(struct virtio_pci_common_cfg, queue_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) offsetof(struct virtio_pci_common_cfg, queue_notify_off));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) offsetof(struct virtio_pci_common_cfg, queue_used_lo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) offsetof(struct virtio_pci_common_cfg, queue_used_hi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* the PCI probing function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct pci_dev *pci_dev = vp_dev->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int err, common, isr, notify, device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u32 notify_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u32 notify_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) check_offsets();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (pci_dev->device < 0x1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* Transitional devices: use the PCI subsystem device id as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * virtio device id, same as legacy driver always did.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) vp_dev->vdev.id.device = pci_dev->subsystem_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* Modern devices: simply use PCI device id, but start from 0x1040. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) vp_dev->vdev.id.device = pci_dev->device - 0x1040;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* check for a common config: if not, use legacy mode (bar 0). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) IORESOURCE_IO | IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) &vp_dev->modern_bars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (!common) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) dev_info(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) "virtio_pci: leaving for legacy driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* If common is there, these should be too... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) IORESOURCE_IO | IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) &vp_dev->modern_bars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) IORESOURCE_IO | IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) &vp_dev->modern_bars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (!isr || !notify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) "virtio_pci: missing capabilities %i/%i/%i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) common, isr, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) err = dma_set_mask_and_coherent(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* Device capability is only mandatory for devices that have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * device-specific configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) IORESOURCE_IO | IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) &vp_dev->modern_bars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) "virtio-pci-modern");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) vp_dev->common = map_capability(pci_dev, common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) sizeof(struct virtio_pci_common_cfg), 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 0, sizeof(struct virtio_pci_common_cfg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!vp_dev->common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) goto err_map_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (!vp_dev->isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) goto err_map_isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Read notify_off_multiplier from config space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) pci_read_config_dword(pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) notify + offsetof(struct virtio_pci_notify_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) notify_off_multiplier),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) &vp_dev->notify_offset_multiplier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* Read notify length and offset from config space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) pci_read_config_dword(pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) notify + offsetof(struct virtio_pci_notify_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) cap.length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ¬ify_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) pci_read_config_dword(pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) notify + offsetof(struct virtio_pci_notify_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cap.offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ¬ify_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* We don't know how many VQs we'll map, ahead of the time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * If notify length is small, map it all now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * Otherwise, map each VQ individually later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 0, notify_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) &vp_dev->notify_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (!vp_dev->notify_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) goto err_map_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) vp_dev->notify_map_cap = notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* Again, we don't know how much we should map, but PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * is more than enough for all existing devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) vp_dev->device = map_capability(pci_dev, device, 0, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) &vp_dev->device_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (!vp_dev->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) goto err_map_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) vp_dev->vdev.config = &virtio_pci_config_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) vp_dev->config_vector = vp_config_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) vp_dev->setup_vq = setup_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) vp_dev->del_vq = del_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) err_map_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (vp_dev->notify_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) pci_iounmap(pci_dev, vp_dev->notify_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) err_map_notify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) pci_iounmap(pci_dev, vp_dev->isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) err_map_isr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pci_iounmap(pci_dev, vp_dev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) err_map_common:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct pci_dev *pci_dev = vp_dev->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (vp_dev->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) pci_iounmap(pci_dev, vp_dev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (vp_dev->notify_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) pci_iounmap(pci_dev, vp_dev->notify_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) pci_iounmap(pci_dev, vp_dev->isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) pci_iounmap(pci_dev, vp_dev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) pci_release_selected_regions(pci_dev, vp_dev->modern_bars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }