^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * A fairly generic DMA-API to IOMMU-API glue layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014-2015 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * based in part on arch/arm/mm/dma-mapping.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2000-2004 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/acpi_iort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/huge_mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/iova.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <trace/hooks/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct iommu_dma_msi_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) dma_addr_t iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) enum iommu_dma_cookie_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) IOMMU_DMA_IOVA_COOKIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) IOMMU_DMA_MSI_COOKIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct iommu_dma_cookie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) enum iommu_dma_cookie_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct iova_domain iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) dma_addr_t msi_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct list_head msi_page_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Domain for flush queue callback; NULL if flush queue not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct iommu_domain *fq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return cookie->iovad.granule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct iommu_dma_cookie *cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) INIT_LIST_HEAD(&cookie->msi_page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) cookie->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @domain: IOMMU domain to prepare for DMA-API usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * IOMMU drivers should normally call this from their domain_alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * callback when domain->type == IOMMU_DOMAIN_DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int iommu_get_dma_cookie(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (domain->iova_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (!domain->iova_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) EXPORT_SYMBOL(iommu_get_dma_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * iommu_get_msi_cookie - Acquire just MSI remapping resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @domain: IOMMU domain to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @base: Start address of IOVA region for MSI mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Users who manage their own IOVA allocation and do not want DMA API support,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * but would still like to take advantage of automatic MSI remapping, can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * this to initialise their own domain appropriately. Users should reserve a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * contiguous IOVA region, starting at @base, large enough to accommodate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * used by the devices attached to @domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct iommu_dma_cookie *cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (domain->type != IOMMU_DOMAIN_UNMANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (domain->iova_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) cookie->msi_iova = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) domain->iova_cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) EXPORT_SYMBOL(iommu_get_msi_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * iommu_put_dma_cookie - Release a domain's DMA mapping resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * iommu_get_msi_cookie()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * IOMMU drivers should normally call this from their domain_free callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void iommu_put_dma_cookie(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct iommu_dma_msi_page *msi, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (!cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) put_iova_domain(&cookie->iovad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) list_del(&msi->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) kfree(msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) kfree(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) domain->iova_cookie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) EXPORT_SYMBOL(iommu_put_dma_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * iommu_dma_get_resv_regions - Reserved region driver helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @dev: Device from iommu_get_resv_regions()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * @list: Reserved region list from iommu_get_resv_regions()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * IOMMU drivers can use this to implement their .get_resv_regions callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * for general non-IOMMU-specific reservations. Currently, this covers GICv3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * ITS region reservation on ACPI based ARM platforms that may require HW MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) iort_iommu_msi_get_resv_regions(dev, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) EXPORT_SYMBOL(iommu_dma_get_resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) phys_addr_t start, phys_addr_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct iommu_dma_msi_page *msi_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int i, num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) start -= iova_offset(iovad, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) for (i = 0; i < num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!msi_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) msi_page->phys = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) msi_page->iova = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) INIT_LIST_HEAD(&msi_page->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) list_add(&msi_page->list, &cookie->msi_page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) start += iovad->granule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int iova_reserve_pci_windows(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct iova_domain *iovad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct resource_entry *window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned long lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) phys_addr_t start = 0, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) resource_list_for_each_entry(window, &bridge->windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (resource_type(window->res) != IORESOURCE_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) lo = iova_pfn(iovad, window->res->start - window->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) hi = iova_pfn(iovad, window->res->end - window->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) reserve_iova(iovad, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* Get reserved DMA windows from host bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) resource_list_for_each_entry(window, &bridge->dma_ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) end = window->res->start - window->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) resv_iova:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (end > start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) lo = iova_pfn(iovad, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) hi = iova_pfn(iovad, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) reserve_iova(iovad, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) } else if (end < start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* dma_ranges list should be sorted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) "Failed to reserve IOVA [%pa-%pa]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) &start, &end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) start = window->res->end - window->offset + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* If window is last entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (window->node.next == &bridge->dma_ranges &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) end != ~(phys_addr_t)0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) end = ~(phys_addr_t)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) goto resv_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static int iova_reserve_iommu_regions(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct iommu_resv_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) LIST_HEAD(resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (dev_is_pci(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) iommu_get_resv_regions(dev, &resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) list_for_each_entry(region, &resv_regions, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned long lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* We ARE the software that manages these! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (region->type == IOMMU_RESV_SW_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) lo = iova_pfn(iovad, region->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) hi = iova_pfn(iovad, region->start + region->length - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) reserve_iova(iovad, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (region->type == IOMMU_RESV_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ret = cookie_init_hw_msi_region(cookie, region->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) region->start + region->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) iommu_put_resv_regions(dev, &resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct iommu_dma_cookie *cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) domain = cookie->fq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * implies that ops->flush_iotlb_all must be non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) domain->ops->flush_iotlb_all(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * iommu_dma_init_domain - Initialise a DMA mapping domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * @base: IOVA at which the mappable address space starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @size: Size of IOVA space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * @dev: Device the domain is being initialised for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * @base and @size should be exact multiples of IOMMU page granularity to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * avoid rounding surprises. If necessary, we reserve the page at address 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * any change which could make prior IOVAs invalid will fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u64 size, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned long order, base_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct iova_domain *iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Use the smallest supported page size for IOVA granularity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) order = __ffs(domain->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) base_pfn = max_t(unsigned long, 1, base >> order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* Check the domain allows at least some access to the device... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (domain->geometry.force_aperture) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (base > domain->geometry.aperture_end ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) base + size <= domain->geometry.aperture_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pr_warn("specified DMA range outside IOMMU capability\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* ...then finally give it a kicking to make sure it fits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) base_pfn = max_t(unsigned long, base_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) domain->geometry.aperture_start >> order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* start_pfn is always nonzero for an already-initialised domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (iovad->start_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (1UL << order != iovad->granule ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) base_pfn != iovad->start_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pr_warn("Incompatible range for DMA domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) init_iova_domain(iovad, 1UL << order, base_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pr_warn("iova flush queue initialization failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) cookie->fq_domain = domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return iova_reserve_iommu_regions(dev, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static int iommu_dma_deferred_attach(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) const struct iommu_ops *ops = domain->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!is_kdump_kernel())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (unlikely(ops->is_attach_deferred &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ops->is_attach_deferred(domain, dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return iommu_attach_device(domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * Should be called prior to using dma-apis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct iommu_dma_cookie *cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct iova_domain *iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned long pfn_lo, pfn_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!domain || !domain->iova_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* iova will be freed automatically by put_iova_domain() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) pfn_lo = iova_pfn(iovad, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pfn_hi = iova_pfn(iovad, base + size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!reserve_iova(iovad, pfn_lo, pfn_hi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) EXPORT_SYMBOL(iommu_dma_reserve_iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * Should be called prior to using dma-apis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int iommu_dma_enable_best_fit_algo(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct iova_domain *iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (!domain || !domain->iova_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) iovad->best_fit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) EXPORT_SYMBOL(iommu_dma_enable_best_fit_algo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * page flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * @dir: Direction of DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * @coherent: Is the DMA master cache-coherent?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * @attrs: DMA attributes for the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Return: corresponding IOMMU API page protection flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int prot = coherent ? IOMMU_CACHE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (attrs & DMA_ATTR_PRIVILEGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) prot |= IOMMU_PRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (attrs & DMA_ATTR_SYS_CACHE_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) prot |= IOMMU_SYS_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) prot |= IOMMU_SYS_CACHE_NWA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return prot | IOMMU_READ | IOMMU_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return prot | IOMMU_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return prot | IOMMU_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) size_t size, u64 dma_limit, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) unsigned long shift, iova_len, iova = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) cookie->msi_iova += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return cookie->msi_iova - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) shift = iova_shift(iovad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) iova_len = size >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * Freeing non-power-of-two-sized allocations back into the IOVA caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * will come back to bite us badly, so we have to waste a bit of space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * rounding up anything cacheable to make sure that can't happen. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * order of the unadjusted size will still match upon freeing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) iova_len = roundup_pow_of_two(iova_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (domain->geometry.force_aperture)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Try to get PCI devices a SAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) iova = alloc_iova_fast(iovad, iova_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) DMA_BIT_MASK(32) >> shift, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) trace_android_vh_iommu_alloc_iova(dev, (dma_addr_t)iova << shift, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) trace_android_vh_iommu_iovad_alloc_iova(dev, iovad, (dma_addr_t)iova << shift, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return (dma_addr_t)iova << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dma_addr_t iova, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* The MSI case is only ever cleaning up its most recent allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (cookie->type == IOMMU_DMA_MSI_COOKIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) cookie->msi_iova -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) else if (cookie->fq_domain) /* non-strict mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) queue_iova(iovad, iova_pfn(iovad, iova),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) size >> iova_shift(iovad), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) free_iova_fast(iovad, iova_pfn(iovad, iova),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) size >> iova_shift(iovad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) trace_android_vh_iommu_free_iova(iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) trace_android_vh_iommu_iovad_free_iova(iovad, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct iommu_domain *domain = iommu_get_dma_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) size_t iova_off = iova_offset(iovad, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct iommu_iotlb_gather iotlb_gather;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) size_t unmapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) dma_addr -= iova_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) size = iova_align(iovad, size + iova_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) iommu_iotlb_gather_init(&iotlb_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) WARN_ON(unmapped != size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!cookie->fq_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) iommu_iotlb_sync(domain, &iotlb_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) iommu_dma_free_iova(cookie, dma_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) size_t size, int prot, u64 dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct iommu_domain *domain = iommu_get_dma_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) size_t iova_off = iova_offset(iovad, phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dma_addr_t iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (unlikely(iommu_dma_deferred_attach(dev, domain)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) size = iova_align(iovad, size + iova_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) iommu_dma_free_iova(cookie, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return iova + iova_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static void __iommu_dma_free_pages(struct page **pages, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) __free_page(pages[count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static struct page **__iommu_dma_alloc_pages(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) unsigned int count, unsigned long order_mask, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) unsigned int i = 0, nid = dev_to_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) order_mask &= (2U << MAX_ORDER) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (!order_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* IOMMU can map any pages, so himem can also be used here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* It makes no sense to muck about with huge pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) gfp &= ~__GFP_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) unsigned int order_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * Higher-order allocations are a convenience rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * than a necessity, hence using __GFP_NORETRY until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * falling back to minimum-order allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) for (order_mask &= (2U << __fls(count)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) order_mask; order_mask &= ~order_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) unsigned int order = __fls(order_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) gfp_t alloc_flags = gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) order_size = 1U << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (order_mask > order_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) alloc_flags |= __GFP_NORETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) page = alloc_pages_node(nid, alloc_flags, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) split_page(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) __iommu_dma_free_pages(pages, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) count -= order_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) while (order_size--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) pages[i++] = page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * @dev: Device to allocate memory for. Must be a real device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * attached to an iommu_dma_domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * @size: Size of buffer in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * @dma_handle: Out argument for allocated DMA handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * @gfp: Allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * @prot: pgprot_t to use for the remapped mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * @attrs: DMA attributes for this allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * but an IOMMU which supports smaller pages might not map the whole thing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * Return: Mapped virtual address, or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct iommu_domain *domain = iommu_get_dma_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) bool coherent = dev_is_dma_coherent(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct sg_table sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dma_addr_t iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) *dma_handle = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (unlikely(iommu_dma_deferred_attach(dev, domain)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) min_size = alloc_sizes & -alloc_sizes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (min_size < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) min_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) alloc_sizes |= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) size = ALIGN(size, min_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) alloc_sizes = min_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) count = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) size = iova_align(iovad, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (!iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) goto out_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) goto out_free_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (!(ioprot & IOMMU_CACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) arch_dma_prep_coherent(sg_page(sg), sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) goto out_free_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) vaddr = dma_common_pages_remap(pages, size, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) *dma_handle = iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) sg_free_table(&sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) __iommu_dma_unmap(dev, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) out_free_sg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) sg_free_table(&sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) out_free_iova:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) iommu_dma_free_iova(cookie, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) out_free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) __iommu_dma_free_pages(pages, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * __iommu_dma_mmap - Map a buffer into provided user VMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * @pages: Array representing buffer from __iommu_dma_alloc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * @size: Size of buffer in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * @vma: VMA describing requested userspace mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * Maps the pages of the buffer in @pages into @vma. The caller is responsible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * for verifying the correct size and protection of @vma beforehand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static int __iommu_dma_mmap(struct page **pages, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) static void iommu_dma_sync_single_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (dev_is_dma_coherent(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) arch_sync_dma_for_cpu(phys, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static void iommu_dma_sync_single_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (dev_is_dma_coherent(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) arch_sync_dma_for_device(phys, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static void iommu_dma_sync_sg_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct scatterlist *sgl, int nelems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (dev_is_dma_coherent(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) for_each_sg(sgl, sg, nelems, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static void iommu_dma_sync_sg_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct scatterlist *sgl, int nelems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (dev_is_dma_coherent(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) for_each_sg(sgl, sg, nelems, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) unsigned long offset, size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) phys_addr_t phys = page_to_phys(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) bool coherent = dev_is_dma_coherent(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) int prot = dma_info_to_prot(dir, coherent, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) dma_addr_t dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) dma_handle != DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) arch_sync_dma_for_device(phys, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) size_t size, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) __iommu_dma_unmap(dev, dma_handle, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * Prepare a successfully-mapped scatterlist to give back to the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * At this point the segments are already laid out by iommu_dma_map_sg() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * avoid individually crossing any boundaries, so we merely need to check a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * segment's start address to avoid concatenating across one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) dma_addr_t dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct scatterlist *s, *cur = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) unsigned long seg_mask = dma_get_seg_boundary(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int i, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) for_each_sg(sg, s, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* Restore this segment's original unaligned fields first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) unsigned int s_iova_off = sg_dma_address(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) unsigned int s_length = sg_dma_len(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) unsigned int s_iova_len = s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) s->offset += s_iova_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) s->length = s_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) sg_dma_address(s) = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) sg_dma_len(s) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * Now fill in the real DMA data. If...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * - there is a valid output segment to append to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * - and this segment starts on an IOVA page boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * - but doesn't fall at a segment boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * - and wouldn't make the resulting output segment too long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) (max_len - cur_len >= s_length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /* ...then concatenate it with the previous one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) cur_len += s_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* Otherwise start the next output segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) cur = sg_next(cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) cur_len = s_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) sg_dma_address(cur) = dma_addr + s_iova_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) sg_dma_len(cur) = cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) dma_addr += s_iova_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (s_length + s_iova_off < s_iova_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) cur_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * If mapping failed, then just restore the original list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * but making sure the DMA fields are invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static void __invalidate_sg(struct scatterlist *sg, int nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) for_each_sg(sg, s, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (sg_dma_address(s) != DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) s->offset += sg_dma_address(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (sg_dma_len(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) s->length = sg_dma_len(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) sg_dma_address(s) = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) sg_dma_len(s) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * The DMA API client is passing in a scatterlist which could describe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * any old buffer layout, but the IOMMU API requires everything to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * aligned to IOMMU pages. Hence the need for this complicated bit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * impedance-matching, to be able to hand off a suitably-aligned list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * but still preserve the original offsets and sizes for the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) int nents, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct iommu_domain *domain = iommu_get_dma_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct scatterlist *s, *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) dma_addr_t iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) size_t iova_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) unsigned long mask = dma_get_seg_boundary(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (unlikely(iommu_dma_deferred_attach(dev, domain)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * Work out how much IOVA space we need, and align the segments to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * IOVA granules for the IOMMU driver to handle. With some clever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * trickery we can modify the list in-place, but reversibly, by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * stashing the unaligned parts in the as-yet-unused DMA fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) for_each_sg(sg, s, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) size_t s_iova_off = iova_offset(iovad, s->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) size_t s_length = s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) size_t pad_len = (mask - iova_len + 1) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) sg_dma_address(s) = s_iova_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) sg_dma_len(s) = s_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) s->offset -= s_iova_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) s_length = iova_align(iovad, s_length + s_iova_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) s->length = s_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Due to the alignment of our single IOVA allocation, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * depend on these assumptions about the segment boundary mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * - If mask size >= IOVA size, then the IOVA range cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * possibly fall across a boundary, so we don't care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * - If mask size < IOVA size, then the IOVA range must start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * exactly on a boundary, therefore we can lay things out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * based purely on segment lengths without needing to know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * the actual addresses beforehand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * - The mask must be a power of 2, so pad_len == 0 if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * iova_len == 0, thus we cannot dereference prev the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * time through here (i.e. before it has a meaningful value).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (pad_len && pad_len < s_length - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) prev->length += pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) iova_len += pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) iova_len += s_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) prev = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (!iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) goto out_restore_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * We'll leave any physical concatenation to the IOMMU driver's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * implementation - it knows better than we do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) goto out_free_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return __finalise_sg(dev, sg, nents, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) out_free_iova:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) iommu_dma_free_iova(cookie, iova, iova_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) out_restore_sg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) __invalidate_sg(sg, nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) int nents, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) dma_addr_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct scatterlist *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * The scatterlist segments are mapped into a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * contiguous IOVA allocation, so this is incredibly easy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) start = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) for_each_sg(sg_next(sg), tmp, nents - 1, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (sg_dma_len(tmp) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) sg = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) end = sg_dma_address(sg) + sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) __iommu_dma_unmap(dev, start, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) size_t size, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return __iommu_dma_map(dev, phys, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) dma_get_mask(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) size_t size, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) __iommu_dma_unmap(dev, handle, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) size_t alloc_size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int count = alloc_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct page *page = NULL, **pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* Non-coherent atomic allocation? Easy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) dma_free_from_pool(dev, cpu_addr, alloc_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * If it the address is remapped, then it's either non-coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * or highmem CMA, or an iommu_dma_alloc_remap() construction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) pages = dma_common_find_pages(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) page = vmalloc_to_page(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) dma_common_free_remap(cpu_addr, alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /* Lowmem means a coherent atomic or CMA allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) page = virt_to_page(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) __iommu_dma_free_pages(pages, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) dma_free_contiguous(dev, page, alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dma_addr_t handle, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) __iommu_dma_unmap(dev, handle, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) __iommu_dma_free(dev, size, cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct page **pagep, gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) bool coherent = dev_is_dma_coherent(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) size_t alloc_size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) int node = dev_to_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) void *cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) page = dma_alloc_contiguous(dev, alloc_size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) page = alloc_pages_node(node, gfp, get_order(alloc_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) cpu_addr = dma_common_contiguous_remap(page, alloc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) prot, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (!cpu_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) goto out_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (!coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) arch_dma_prep_coherent(page, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) cpu_addr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) *pagep = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) memset(cpu_addr, 0, alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) out_free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) dma_free_contiguous(dev, page, alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static void *iommu_dma_alloc(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) bool coherent = dev_is_dma_coherent(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) void *cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) gfp |= __GFP_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return iommu_dma_alloc_remap(dev, size, handle, gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) !gfpflags_allow_blocking(gfp) && !coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) gfp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (!cpu_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) dev->coherent_dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (*handle == DMA_MAPPING_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) __iommu_dma_free(dev, size, cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) #ifdef CONFIG_DMA_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (!gfpflags_allow_blocking(gfp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) PAGE_KERNEL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) __iommu_dma_unmap(dev, handle, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) __iommu_dma_free(dev, size, cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #define iommu_dma_alloc_noncoherent NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) #define iommu_dma_free_noncoherent NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) #endif /* CONFIG_DMA_REMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) unsigned long pfn, off = vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct page **pages = dma_common_find_pages(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return __iommu_dma_mmap(pages, size, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) pfn = vmalloc_to_pfn(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) pfn = page_to_pfn(virt_to_page(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return remap_pfn_range(vma, vma->vm_start, pfn + off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) vma->vm_end - vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct page **pages = dma_common_find_pages(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return sg_alloc_table_from_pages(sgt, pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) PAGE_ALIGN(size) >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 0, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) page = vmalloc_to_page(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) page = virt_to_page(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct iommu_domain *domain = iommu_get_dma_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static const struct dma_map_ops iommu_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) .alloc = iommu_dma_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) .free = iommu_dma_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) .alloc_pages = dma_common_alloc_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) .free_pages = dma_common_free_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) .alloc_noncoherent = iommu_dma_alloc_noncoherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) .free_noncoherent = iommu_dma_free_noncoherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) .mmap = iommu_dma_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) .get_sgtable = iommu_dma_get_sgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) .map_page = iommu_dma_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) .unmap_page = iommu_dma_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) .map_sg = iommu_dma_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) .unmap_sg = iommu_dma_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) .sync_single_for_device = iommu_dma_sync_single_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) .sync_sg_for_device = iommu_dma_sync_sg_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) .map_resource = iommu_dma_map_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) .unmap_resource = iommu_dma_unmap_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) .get_merge_boundary = iommu_dma_get_merge_boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * The IOMMU core code allocates the default DMA domain, which the underlying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * IOMMU driver needs to support via the dma-iommu layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (!domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * The IOMMU core code allocates the default DMA domain, which the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * underlying IOMMU driver needs to support via the dma-iommu layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (domain->type == IOMMU_DOMAIN_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (iommu_dma_init_domain(domain, dma_base, size, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) dev->dma_ops = &iommu_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (domain->type == IOMMU_DOMAIN_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) init_iova_domain_procfs(iovad, dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) phys_addr_t msi_addr, struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct iommu_dma_msi_page *msi_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) dma_addr_t iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) size_t size = cookie_msi_granule(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) msi_addr &= ~(phys_addr_t)(size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) list_for_each_entry(msi_page, &cookie->msi_page_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (msi_page->phys == msi_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return msi_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (!msi_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (!iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) goto out_free_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (iommu_map(domain, iova, msi_addr, size, prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) goto out_free_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) INIT_LIST_HEAD(&msi_page->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) msi_page->phys = msi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) msi_page->iova = iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) list_add(&msi_page->list, &cookie->msi_page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return msi_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) out_free_iova:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) iommu_dma_free_iova(cookie, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) out_free_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) kfree(msi_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) struct device *dev = msi_desc_to_dev(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) struct iommu_dma_msi_page *msi_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static DEFINE_MUTEX(msi_prepare_lock); /* see below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (!domain || !domain->iova_cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) desc->iommu_cookie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * In fact the whole prepare operation should already be serialised by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * irq_domain_mutex further up the callchain, but that's pretty subtle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * on its own, so consider this locking as failsafe documentation...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) mutex_lock(&msi_prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) mutex_unlock(&msi_prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) msi_desc_set_iommu_cookie(desc, msi_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (!msi_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) void iommu_dma_compose_msi_msg(struct msi_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct device *dev = msi_desc_to_dev(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) const struct iommu_dma_msi_page *msi_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) msi_page = msi_desc_get_iommu_cookie(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) msg->address_hi = upper_32_bits(msi_page->iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) msg->address_lo += lower_32_bits(msi_page->iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static int iommu_dma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return iova_cache_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) arch_initcall(iommu_dma_init);