^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/dma-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/swiotlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <xen/interface/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <xen/interface/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <xen/xen-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <xen/swiotlb-xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/xen/interface.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned long xen_get_swiotlb_free_pages(unsigned int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) phys_addr_t base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) for_each_mem_range(i, &base, NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (base < (phys_addr_t)0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (IS_ENABLED(CONFIG_ZONE_DMA32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) flags |= __GFP_DMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) flags |= __GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return __get_free_pages(flags, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static bool hypercall_cflush = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* buffers in highmem or foreign pages cannot cross page boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void dma_cache_maint(struct device *dev, dma_addr_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) size_t size, u32 op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct gnttab_cache_flush cflush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) cflush.offset = xen_offset_in_page(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) cflush.op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) handle &= XEN_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) cflush.a.dev_bus_addr = dma_to_phys(dev, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (size + cflush.offset > XEN_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) cflush.length = XEN_PAGE_SIZE - cflush.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) cflush.length = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) cflush.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) handle += cflush.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) size -= cflush.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) } while (size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Dom0 is mapped 1:1, and while the Linux page can span across multiple Xen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * pages, it is not possible for it to contain a mix of local and foreign Xen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * pages. Calling pfn_valid on a foreign mfn will always return false, so if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * pfn_valid returns true the pages is local and we can use the native
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * dma-direct functions, otherwise we call the Xen specific version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (dir != DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (dir == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dma_cache_maint(dev, handle, size, GNTTAB_CACHE_CLEAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) bool xen_arch_need_swiotlb(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) phys_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dma_addr_t dev_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned int xen_pfn = XEN_PFN_DOWN(phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned int bfn = XEN_PFN_DOWN(dma_to_phys(dev, dev_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * The swiotlb buffer should be used if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * - Xen doesn't have the cache flush hypercall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * - The Linux page refers to foreign memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * - The device doesn't support coherent DMA request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * The Linux page may be spanned acrros multiple Xen page, although
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * it's not possible to have a mix of local and foreign Xen page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Furthermore, range_straddles_page_boundary is already checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * if buffer is physically contiguous in the host RAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * Therefore we only need to check the first Xen page to know if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * require a bounce buffer because the device doesn't support coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * memory and we are not able to flush the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return (!hypercall_cflush && (xen_pfn != bfn) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) !dev_is_dma_coherent(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned int address_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) dma_addr_t *dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!xen_initial_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* we assume that dom0 is mapped 1:1 for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) *dma_handle = pstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int __init xen_mm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct gnttab_cache_flush cflush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!xen_initial_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) xen_swiotlb_init(1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cflush.op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cflush.a.dev_bus_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) cflush.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) cflush.length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) hypercall_cflush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) arch_initcall(xen_mm_init);