Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Virtio PCI driver - legacy device support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * This module allows virtio devices to be used over a virtual PCI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * This can be used with QEMU based VMMs like KVM or Xen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright IBM Corp. 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright Red Hat, Inc. 2014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *  Anthony Liguori  <aliguori@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *  Rusty Russell <rusty@rustcorp.com.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *  Michael S. Tsirkin <mst@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "virtio_pci_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /* virtio config->get_features() implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static u64 vp_get_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	/* When someone needs more than 32 feature bits, we'll need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	 * steal a bit to indicate that the rest are somewhere else. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) /* virtio config->finalize_features() implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static int vp_finalize_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	/* Give virtio_ring a chance to accept features. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	vring_transport_features(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	/* Make sure we don't have any features > 32 bits! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	BUG_ON((u32)vdev->features != vdev->features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	/* We only support 32 feature bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) /* virtio config->get() implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static void vp_get(struct virtio_device *vdev, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		   void *buf, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	void __iomem *ioaddr = vp_dev->ioaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 			offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	u8 *ptr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		ptr[i] = ioread8(ioaddr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /* the config->set() implementation.  it's symmetric to the config->get()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static void vp_set(struct virtio_device *vdev, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		   const void *buf, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	void __iomem *ioaddr = vp_dev->ioaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 			VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 			offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	const u8 *ptr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		iowrite8(ptr[i], ioaddr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) /* config->{get,set}_status() implementations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static u8 vp_get_status(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static void vp_set_status(struct virtio_device *vdev, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	/* We should never be setting status to 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	BUG_ON(status == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static void vp_reset(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	/* 0 status means a reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	/* Flush out the status write, and flush in device writes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 * including MSi-X interrupts, if any. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	/* Flush pending VQ/configuration callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	vp_synchronize_vectors(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	/* Setup the vector used for configuration events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	/* Verify we had enough resources to assign the vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	/* Will also flush the write out to device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 				  struct virtio_pci_vq_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 				  unsigned index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 				  void (*callback)(struct virtqueue *vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				  const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 				  bool ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 				  u16 msix_vec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	u16 num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	u64 q_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	/* Select the queue we're interested in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	/* Check if queue is either not available or already active. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	info->msix_vector = msix_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	/* create the vring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	vq = vring_create_virtqueue(index, num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 				    VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 				    true, false, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 				    vp_notify, callback, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (!vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (q_pfn >> 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		dev_err(&vp_dev->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			"platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			0x1ULL << (32 + PAGE_SHIFT - 30));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		err = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		goto out_del_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	/* activate the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			goto out_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	return vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) out_deactivate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) out_del_vq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	vring_del_virtqueue(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void del_vq(struct virtio_pci_vq_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct virtqueue *vq = info->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	if (vp_dev->msix_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		iowrite16(VIRTIO_MSI_NO_VECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			  vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		/* Flush the write out to device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	/* Select and deactivate the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	vring_del_virtqueue(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static const struct virtio_config_ops virtio_pci_config_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	.get		= vp_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	.set		= vp_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	.get_status	= vp_get_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	.set_status	= vp_set_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	.reset		= vp_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	.find_vqs	= vp_find_vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	.del_vqs	= vp_del_vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	.get_features	= vp_get_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	.finalize_features = vp_finalize_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	.bus_name	= vp_bus_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	.set_vq_affinity = vp_set_vq_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	.get_vq_affinity = vp_get_vq_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* the PCI probing function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	struct pci_dev *pci_dev = vp_dev->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	/* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		       VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		 * The virtio ring base address is expressed as a 32-bit PFN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		 * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		dma_set_coherent_mask(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 				DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (!vp_dev->ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		goto err_iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	/* we use the subsystem vendor/device id as the virtio vendor/device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	 * id.  this allows us to use the same PCI vendor/device id for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	 * virtio devices and to identify the particular virtio driver by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	 * the subsystem ids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	vp_dev->vdev.id.device = pci_dev->subsystem_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	vp_dev->vdev.config = &virtio_pci_config_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	vp_dev->config_vector = vp_config_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	vp_dev->setup_vq = setup_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	vp_dev->del_vq = del_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) err_iomap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	pci_release_region(pci_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	struct pci_dev *pci_dev = vp_dev->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	pci_iounmap(pci_dev, vp_dev->ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	pci_release_region(pci_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }