Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Virtio driver for the paravirtualized IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2019 Arm Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/amba/bus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/interval_tree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/of_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/virtio_config.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/virtio_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <uapi/linux/virtio_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define MSI_IOVA_BASE			0x8000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define MSI_IOVA_LENGTH			0x100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define VIOMMU_REQUEST_VQ		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define VIOMMU_EVENT_VQ			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define VIOMMU_NR_VQS			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) struct viommu_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	struct iommu_device		iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	struct device			*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	struct virtio_device		*vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	struct ida			domain_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	struct virtqueue		*vqs[VIOMMU_NR_VQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	spinlock_t			request_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	struct list_head		requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	void				*evts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	/* Device configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	struct iommu_domain_geometry	geometry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	u64				pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	u32				first_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	u32				last_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	/* Supported MAP flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	u32				map_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	u32				probe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) struct viommu_mapping {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	phys_addr_t			paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	struct interval_tree_node	iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	u32				flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) struct viommu_domain {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	struct iommu_domain		domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	struct viommu_dev		*viommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	struct mutex			mutex; /* protects viommu pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	unsigned int			id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	u32				map_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	spinlock_t			mappings_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	struct rb_root_cached		mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	unsigned long			nr_endpoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) struct viommu_endpoint {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct device			*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	struct viommu_dev		*viommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct viommu_domain		*vdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	struct list_head		resv_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) struct viommu_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	struct list_head		list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	void				*writeback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	unsigned int			write_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	unsigned int			len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	char				buf[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define VIOMMU_FAULT_RESV_MASK		0xffffff00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) struct viommu_event {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		u32			head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		struct virtio_iommu_fault fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define to_viommu_domain(domain)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	container_of(domain, struct viommu_domain, domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static int viommu_get_req_errno(void *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	switch (tail->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	case VIRTIO_IOMMU_S_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	case VIRTIO_IOMMU_S_UNSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	case VIRTIO_IOMMU_S_INVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	case VIRTIO_IOMMU_S_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	case VIRTIO_IOMMU_S_NOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	case VIRTIO_IOMMU_S_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	case VIRTIO_IOMMU_S_NOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	case VIRTIO_IOMMU_S_IOERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	case VIRTIO_IOMMU_S_DEVERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) static void viommu_set_req_status(void *buf, size_t len, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	tail->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 					  struct virtio_iommu_req_head *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 					  size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	size_t tail_size = sizeof(struct virtio_iommu_req_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	if (req->type == VIRTIO_IOMMU_T_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		return len - viommu->probe_size - tail_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	return len - tail_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * __viommu_sync_req - Complete all in-flight requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  * Wait for all added requests to complete. When this function returns, all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * requests that were in-flight at the time of the call have completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static int __viommu_sync_req(struct viommu_dev *viommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	size_t write_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	struct viommu_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	assert_spin_locked(&viommu->request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	virtqueue_kick(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	while (!list_empty(&viommu->requests)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		req = virtqueue_get_buf(vq, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			viommu_set_req_status(req->buf, req->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 					      VIRTIO_IOMMU_S_IOERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		write_len = req->len - req->write_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		if (req->writeback && len == write_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			memcpy(req->writeback, req->buf + req->write_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 			       write_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) static int viommu_sync_req(struct viommu_dev *viommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	spin_lock_irqsave(&viommu->request_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	ret = __viommu_sync_req(viommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	spin_unlock_irqrestore(&viommu->request_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  * __viommu_add_request - Add one request to the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  * @buf: pointer to the request buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  * @len: length of the request buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  * @writeback: copy data back to the buffer when the request completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  * Add a request to the queue. Only synchronize the queue if it's already full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  * Otherwise don't kick the queue nor wait for requests to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  * When @writeback is true, data written by the device, including the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  * status, is copied into @buf after the request completes. This is unsafe if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  * the caller allocates @buf on stack and drops the lock between add_req() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * sync_req().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * Return 0 if the request was successfully added to the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			    bool writeback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	off_t write_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	struct viommu_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	struct scatterlist top_sg, bottom_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	assert_spin_locked(&viommu->request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	write_offset = viommu_get_write_desc_offset(viommu, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	if (write_offset <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	req->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	if (writeback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		req->writeback = buf + write_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		req->write_offset = write_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	memcpy(&req->buf, buf, write_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	sg_init_one(&top_sg, req->buf, write_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (ret == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		/* If the queue is full, sync and retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		if (!__viommu_sync_req(viommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	list_add_tail(&req->list, &viommu->requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	spin_lock_irqsave(&viommu->request_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	ret = __viommu_add_req(viommu, buf, len, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		dev_dbg(viommu->dev, "could not add request: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	spin_unlock_irqrestore(&viommu->request_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * Send a request and wait for it to complete. Return the request status (as an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * errno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 				size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	spin_lock_irqsave(&viommu->request_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	ret = __viommu_add_req(viommu, buf, len, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	ret = __viommu_sync_req(viommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		/* Fall-through (get the actual request status) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	ret = viommu_get_req_errno(buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	spin_unlock_irqrestore(&viommu->request_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  * viommu_add_mapping - add a mapping to the internal tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * On success, return the new mapping. Otherwise return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			      phys_addr_t paddr, size_t size, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	struct viommu_mapping *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	if (!mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	mapping->paddr		= paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	mapping->iova.start	= iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	mapping->iova.last	= iova + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	mapping->flags		= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	interval_tree_insert(&mapping->iova, &vdomain->mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * viommu_del_mappings - remove mappings from the internal tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * @vdomain: the domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * @iova: start of the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * @size: size of the range. A size of 0 corresponds to the entire address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  *	space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * On success, returns the number of unmapped bytes (>= size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) static size_t viommu_del_mappings(struct viommu_domain *vdomain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 				  unsigned long iova, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	size_t unmapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	unsigned long last = iova + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	struct viommu_mapping *mapping = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	struct interval_tree_node *node, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	spin_lock_irqsave(&vdomain->mappings_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	next = interval_tree_iter_first(&vdomain->mappings, iova, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		node = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		mapping = container_of(node, struct viommu_mapping, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		next = interval_tree_iter_next(node, iova, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		/* Trying to split a mapping? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		if (mapping->iova.start < iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		 * Virtio-iommu doesn't allow UNMAP to split a mapping created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		 * with a single MAP request, so remove the full mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		unmapped += mapping->iova.last - mapping->iova.start + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		interval_tree_remove(node, &vdomain->mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		kfree(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	return unmapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  * viommu_replay_mappings - re-send MAP requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * When reattaching a domain that was previously detached from all endpoints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  * mappings were deleted from the device. Re-create the mappings available in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  * the internal tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) static int viommu_replay_mappings(struct viommu_domain *vdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	struct viommu_mapping *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	struct interval_tree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	struct virtio_iommu_req_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	spin_lock_irqsave(&vdomain->mappings_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		mapping = container_of(node, struct viommu_mapping, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		map = (struct virtio_iommu_req_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			.head.type	= VIRTIO_IOMMU_T_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			.domain		= cpu_to_le32(vdomain->id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			.virt_start	= cpu_to_le64(mapping->iova.start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			.virt_end	= cpu_to_le64(mapping->iova.last),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			.phys_start	= cpu_to_le64(mapping->paddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 			.flags		= cpu_to_le32(mapping->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		node = interval_tree_iter_next(node, 0, -1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			       struct virtio_iommu_probe_resv_mem *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			       size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	u64 start64, end64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	phys_addr_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	struct iommu_resv_region *region = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	start = start64 = le64_to_cpu(mem->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	end = end64 = le64_to_cpu(mem->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	size = end64 - start64 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	/* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	if (start != start64 || end != end64 || size < end64 - start64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	if (len < sizeof(*mem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	switch (mem->subtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			 mem->subtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		region = iommu_alloc_resv_region(start, size, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 						 IOMMU_RESV_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	case VIRTIO_IOMMU_RESV_MEM_T_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		region = iommu_alloc_resv_region(start, size, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 						 IOMMU_RESV_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	if (!region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	list_add(&region->list, &vdev->resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	u16 type, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	size_t cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	size_t probe_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	struct virtio_iommu_req_probe *probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	struct virtio_iommu_probe_property *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (!fwspec->num_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	probe_len = sizeof(*probe) + viommu->probe_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		    sizeof(struct virtio_iommu_req_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	probe = kzalloc(probe_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	if (!probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	probe->head.type = VIRTIO_IOMMU_T_PROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	 * For now, assume that properties of an endpoint that outputs multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	 * IDs are consistent. Only probe the first one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	probe->endpoint = cpu_to_le32(fwspec->ids[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	ret = viommu_send_req_sync(viommu, probe, probe_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	prop = (void *)probe->properties;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	while (type != VIRTIO_IOMMU_PROBE_T_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	       cur < viommu->probe_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		len = le16_to_cpu(prop->length) + sizeof(*prop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			ret = viommu_add_resv_mem(vdev, (void *)prop, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			dev_err(dev, "unknown viommu prop 0x%x\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		cur += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		if (cur >= viommu->probe_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		prop = (void *)probe->properties + cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	kfree(probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) static int viommu_fault_handler(struct viommu_dev *viommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 				struct virtio_iommu_fault *fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	char *reason_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	u8 reason	= fault->reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	u32 flags	= le32_to_cpu(fault->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	u32 endpoint	= le32_to_cpu(fault->endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	u64 address	= le64_to_cpu(fault->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	switch (reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	case VIRTIO_IOMMU_FAULT_R_DOMAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		reason_str = "domain";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	case VIRTIO_IOMMU_FAULT_R_MAPPING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		reason_str = "page";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	case VIRTIO_IOMMU_FAULT_R_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		reason_str = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	/* TODO: find EP by ID and report_iommu_fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				    reason_str, endpoint, address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 				    flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 				    flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 				    flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 				    reason_str, endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) static void viommu_event_handler(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	struct scatterlist sg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	struct viommu_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	struct viommu_dev *viommu = vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	while ((evt = virtqueue_get_buf(vq, &len)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		if (len > sizeof(*evt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			dev_err(viommu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 				"invalid event buffer (len %u != %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 				len, sizeof(*evt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		} else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			viommu_fault_handler(viommu, &evt->fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		sg_init_one(sg, evt, sizeof(*evt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			dev_err(viommu->dev, "could not add event buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	virtqueue_kick(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) /* IOMMU API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static struct iommu_domain *viommu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	struct viommu_domain *vdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	if (!vdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	mutex_init(&vdomain->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	spin_lock_init(&vdomain->mappings_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	vdomain->mappings = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (type == IOMMU_DOMAIN_DMA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	    iommu_get_dma_cookie(&vdomain->domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		kfree(vdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	return &vdomain->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) static int viommu_domain_finalise(struct viommu_endpoint *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 				  struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	unsigned long viommu_page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct viommu_dev *viommu = vdev->viommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	struct viommu_domain *vdomain = to_viommu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (viommu_page_size > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		dev_err(vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			"granule 0x%lx larger than system page size 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			viommu_page_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			      viommu->last_domain, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	vdomain->id		= (unsigned int)ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	domain->pgsize_bitmap	= viommu->pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	domain->geometry	= viommu->geometry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	vdomain->map_flags	= viommu->map_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	vdomain->viommu		= viommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static void viommu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	struct viommu_domain *vdomain = to_viommu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	iommu_put_dma_cookie(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	/* Free all remaining mappings (size 2^64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	viommu_del_mappings(vdomain, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	if (vdomain->viommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		ida_free(&vdomain->viommu->domain_ids, vdomain->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	kfree(vdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	struct virtio_iommu_req_attach req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	struct viommu_domain *vdomain = to_viommu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	mutex_lock(&vdomain->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	if (!vdomain->viommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		 * Properly initialize the domain now that we know which viommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		 * owns it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		ret = viommu_domain_finalise(vdev, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	} else if (vdomain->viommu != vdev->viommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		dev_err(dev, "cannot attach to foreign vIOMMU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		ret = -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	mutex_unlock(&vdomain->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	 * In the virtio-iommu device, when attaching the endpoint to a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	 * domain, it is detached from the old one and, if as as a result the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	 * old domain isn't attached to any endpoint, all mappings are removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	 * from the old domain and it is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	 * In the driver the old domain still exists, and its mappings will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	 * recreated if it gets reattached to an endpoint. Otherwise it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 * freed explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	 * vdev->vdomain is protected by group->mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (vdev->vdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		vdev->vdomain->nr_endpoints--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	req = (struct virtio_iommu_req_attach) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		.head.type	= VIRTIO_IOMMU_T_ATTACH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		.domain		= cpu_to_le32(vdomain->id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	for (i = 0; i < fwspec->num_ids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		req.endpoint = cpu_to_le32(fwspec->ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (!vdomain->nr_endpoints) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		 * This endpoint is the first to be attached to the domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		 * Replay existing mappings (e.g. SW MSI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		ret = viommu_replay_mappings(vdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	vdomain->nr_endpoints++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	vdev->vdomain = vdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) static int viommu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	struct virtio_iommu_req_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	struct viommu_domain *vdomain = to_viommu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	if (flags & ~vdomain->map_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	map = (struct virtio_iommu_req_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		.head.type	= VIRTIO_IOMMU_T_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		.domain		= cpu_to_le32(vdomain->id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		.virt_start	= cpu_to_le64(iova),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		.phys_start	= cpu_to_le64(paddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		.virt_end	= cpu_to_le64(iova + size - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		.flags		= cpu_to_le32(flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	if (!vdomain->nr_endpoints)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		viommu_del_mappings(vdomain, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			   size_t size, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	size_t unmapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	struct virtio_iommu_req_unmap unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	struct viommu_domain *vdomain = to_viommu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	unmapped = viommu_del_mappings(vdomain, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	if (unmapped < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	/* Device already removed all mappings after detach. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (!vdomain->nr_endpoints)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		return unmapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	unmap = (struct virtio_iommu_req_unmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		.head.type	= VIRTIO_IOMMU_T_UNMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		.domain		= cpu_to_le32(vdomain->id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		.virt_start	= cpu_to_le64(iova),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		.virt_end	= cpu_to_le64(iova + unmapped - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	return ret ? 0 : unmapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 				       dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	u64 paddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	struct viommu_mapping *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	struct interval_tree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	struct viommu_domain *vdomain = to_viommu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	spin_lock_irqsave(&vdomain->mappings_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		mapping = container_of(node, struct viommu_mapping, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		paddr = mapping->paddr + (iova - mapping->iova.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	return paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) static void viommu_iotlb_sync(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			      struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	struct viommu_domain *vdomain = to_viommu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	viommu_sync_req(vdomain->viommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	struct iommu_resv_region *entry, *new_entry, *msi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	list_for_each_entry(entry, &vdev->resv_regions, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		if (entry->type == IOMMU_RESV_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			msi = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		if (!new_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		list_add_tail(&new_entry->list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 * If the device didn't register any bypass MSI window, add a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	 * software-mapped region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	if (!msi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 					      prot, IOMMU_RESV_SW_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		if (!msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		list_add_tail(&msi->list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	iommu_dma_get_resv_regions(dev, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) static struct iommu_ops viommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) static struct virtio_driver virtio_iommu_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) static int viommu_match_node(struct device *dev, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	return dev->parent->fwnode == data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 						fwnode, viommu_match_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	return dev ? dev_to_virtio(dev)->priv : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) static struct iommu_device *viommu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	struct viommu_endpoint *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	struct viommu_dev *viommu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	if (!fwspec || fwspec->ops != &viommu_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	if (!viommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (!vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	vdev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	vdev->viommu = viommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	INIT_LIST_HEAD(&vdev->resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	dev_iommu_priv_set(dev, vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (viommu->probe_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		/* Get additional information for this endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		ret = viommu_probe_endpoint(viommu, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			goto err_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	return &viommu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) err_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	kfree(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) static void viommu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct viommu_endpoint *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	if (!fwspec || fwspec->ops != &viommu_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	vdev = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	kfree(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) static struct iommu_group *viommu_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (dev_is_pci(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		return pci_device_group(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		return generic_device_group(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	return iommu_fwspec_add_ids(dev, args->args, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) static struct iommu_ops viommu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	.domain_alloc		= viommu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	.domain_free		= viommu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	.attach_dev		= viommu_attach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	.map			= viommu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	.unmap			= viommu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	.iova_to_phys		= viommu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	.iotlb_sync		= viommu_iotlb_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	.probe_device		= viommu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	.release_device		= viommu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	.device_group		= viommu_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	.get_resv_regions	= viommu_get_resv_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	.put_resv_regions	= generic_iommu_put_resv_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	.of_xlate		= viommu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) static int viommu_init_vqs(struct viommu_dev *viommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	struct virtio_device *vdev = dev_to_virtio(viommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	const char *names[] = { "request", "event" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	vq_callback_t *callbacks[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		NULL, /* No async requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		viommu_event_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			       names, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) static int viommu_fill_evtq(struct viommu_dev *viommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	struct scatterlist sg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	struct viommu_event *evts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	size_t nr_evts = vq->num_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 						 sizeof(*evts), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	if (!evts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	for (i = 0; i < nr_evts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		sg_init_one(sg, &evts[i], sizeof(*evts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) static int viommu_probe(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	struct device *parent_dev = vdev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	struct viommu_dev *viommu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	struct device *dev = &vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	u64 input_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	u64 input_end = -1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	if (!viommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	spin_lock_init(&viommu->request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	ida_init(&viommu->domain_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	viommu->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	viommu->vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	INIT_LIST_HEAD(&viommu->requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	ret = viommu_init_vqs(viommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			&viommu->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	if (!viommu->pgsize_bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		goto err_free_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	viommu->last_domain = ~0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	/* Optional features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				struct virtio_iommu_config, input_range.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 				&input_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 				struct virtio_iommu_config, input_range.end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 				&input_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 				struct virtio_iommu_config, domain_range.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 				&viommu->first_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 				struct virtio_iommu_config, domain_range.end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 				&viommu->last_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 				struct virtio_iommu_config, probe_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 				&viommu->probe_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	viommu->geometry = (struct iommu_domain_geometry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		.aperture_start	= input_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		.aperture_end	= input_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		.force_aperture	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	virtio_device_ready(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	/* Populate the event queue with buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	ret = viommu_fill_evtq(viommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		goto err_free_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 				     virtio_bus_name(vdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		goto err_free_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	iommu_device_set_ops(&viommu->iommu, &viommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	iommu_device_register(&viommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	if (pci_bus_type.iommu_ops != &viommu_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			goto err_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) #ifdef CONFIG_ARM_AMBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	if (amba_bustype.iommu_ops != &viommu_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		ret = bus_set_iommu(&amba_bustype, &viommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			goto err_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	if (platform_bus_type.iommu_ops != &viommu_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			goto err_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	vdev->priv = viommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	dev_info(dev, "input address: %u bits\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		 order_base_2(viommu->geometry.aperture_end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) err_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	iommu_device_sysfs_remove(&viommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	iommu_device_unregister(&viommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) err_free_vqs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) static void viommu_remove(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	struct viommu_dev *viommu = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	iommu_device_sysfs_remove(&viommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	iommu_device_unregister(&viommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	/* Stop all virtqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	vdev->config->reset(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	dev_info(&vdev->dev, "device removed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static void viommu_config_changed(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	dev_warn(&vdev->dev, "config changed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static unsigned int features[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	VIRTIO_IOMMU_F_MAP_UNMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	VIRTIO_IOMMU_F_INPUT_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	VIRTIO_IOMMU_F_DOMAIN_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	VIRTIO_IOMMU_F_PROBE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	VIRTIO_IOMMU_F_MMIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static struct virtio_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	{ 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) MODULE_DEVICE_TABLE(virtio, id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static struct virtio_driver virtio_iommu_drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	.driver.name		= KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	.driver.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	.id_table		= id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	.feature_table		= features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	.feature_table_size	= ARRAY_SIZE(features),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	.probe			= viommu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	.remove			= viommu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	.config_changed		= viommu_config_changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) module_virtio_driver(virtio_iommu_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) MODULE_DESCRIPTION("Virtio IOMMU driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) MODULE_LICENSE("GPL v2");