^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Dynamic DMA mapping support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This implementation is a fallback for platforms that do not support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * I/O TLBs (aka DMA address translation hardware).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2000, 2003 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * unnecessary i-cache flushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * 04/07/.. ak Better overflow handling. Assorted fixes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * 05/09/10 linville Add support for syncing ranges, support syncing for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * 08/12/11 beckyb Add highmem support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define pr_fmt(fmt) "software IO TLB: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/dma-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/swiotlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/pfn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/mem_encrypt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/iommu-helper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <trace/events/swiotlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Minimum IO TLB size to bother booting with. Systems with mainly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * 64bit capable cards will only lightly use the swiotlb. If we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * allocate a contiguous 1MB, we're probably in trouble anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) enum swiotlb_force swiotlb_force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Used to do a quick range check in swiotlb_tbl_unmap_single and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) phys_addr_t io_tlb_start, io_tlb_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static unsigned long io_tlb_nslabs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * The number of used IO TLB block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static unsigned long io_tlb_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * This is a free list describing the number of free entries available from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * each index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static unsigned int *io_tlb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static unsigned int io_tlb_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Max segment that we can provide which (if pages are contingous) will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * not be bounced (unless SWIOTLB_FORCE is set).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static unsigned int max_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * We need to save away the original address corresponding to a mapped entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * for the sync operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static phys_addr_t *io_tlb_orig_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Protect the above data structures in the map and unmap calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static DEFINE_SPINLOCK(io_tlb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int late_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) setup_io_tlb_npages(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (isdigit(*str)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) io_tlb_nslabs = simple_strtoul(str, &str, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* avoid tail segment of size < IO_TLB_SEGSIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (*str == ',')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ++str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (!strcmp(str, "force")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) swiotlb_force = SWIOTLB_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) } else if (!strcmp(str, "noforce")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) swiotlb_force = SWIOTLB_NO_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) io_tlb_nslabs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) early_param("swiotlb", setup_io_tlb_npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static bool no_iotlb_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long swiotlb_nr_tbl(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return unlikely(no_iotlb_memory) ? 0 : io_tlb_nslabs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int swiotlb_max_segment(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return unlikely(no_iotlb_memory) ? 0 : max_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) EXPORT_SYMBOL_GPL(swiotlb_max_segment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) void swiotlb_set_max_segment(unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (swiotlb_force == SWIOTLB_FORCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) max_segment = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) max_segment = rounddown(val, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* default to 64MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define IO_TLB_DEFAULT_SIZE (64UL<<20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned long swiotlb_size_or_default(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) size = io_tlb_nslabs << IO_TLB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return size ? size : (IO_TLB_DEFAULT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void swiotlb_print_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (no_iotlb_memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) pr_warn("No low mem\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) pr_info("mapped [mem %pa-%pa] (%luMB)\n", &io_tlb_start, &io_tlb_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bytes >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static inline unsigned long io_tlb_offset(unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return val & (IO_TLB_SEGSIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static inline unsigned long nr_slots(u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return DIV_ROUND_UP(val, IO_TLB_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Early SWIOTLB allocation may be too early to allow an architecture to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * perform the desired operations. This function allows the architecture to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * call SWIOTLB when the operations are possible. It needs to be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * before the SWIOTLB memory is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void __init swiotlb_update_mem_attributes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned long bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (no_iotlb_memory || late_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) vaddr = phys_to_virt(io_tlb_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) memset(vaddr, 0, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned long i, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) size_t alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bytes = nslabs << IO_TLB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) io_tlb_nslabs = nslabs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) io_tlb_start = __pa(tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) io_tlb_end = io_tlb_start + bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * Allocate and initialize the free list array. This array is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * between io_tlb_start and io_tlb_end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) io_tlb_list = memblock_alloc(alloc_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (!io_tlb_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) __func__, alloc_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) io_tlb_orig_addr = memblock_alloc(alloc_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (!io_tlb_orig_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) __func__, alloc_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) for (i = 0; i < io_tlb_nslabs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) io_tlb_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) no_iotlb_memory = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (verbose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) swiotlb_print_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * Statically reserve bounce buffer space and initialize bounce buffer data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * structures for the software IO TLB used to implement the DMA API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) swiotlb_init(int verbose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) size_t default_size = IO_TLB_DEFAULT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) unsigned char *vstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned long bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!io_tlb_nslabs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) bytes = io_tlb_nslabs << IO_TLB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Get IO TLB memory from the low pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (io_tlb_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) memblock_free_early(io_tlb_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) io_tlb_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) pr_warn("Cannot allocate buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) no_iotlb_memory = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Systems with larger DMA zones (those that don't support ISA) can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * initialize the swiotlb later using the slab allocator if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * This should be just like above, but with some error catching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) swiotlb_late_init_with_default_size(size_t default_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) unsigned long bytes, req_nslabs = io_tlb_nslabs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned char *vstart = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!io_tlb_nslabs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * Get IO TLB memory from the low pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) io_tlb_nslabs = SLABS_PER_PAGE << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) bytes = io_tlb_nslabs << IO_TLB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (vstart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) order--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!vstart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) io_tlb_nslabs = req_nslabs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (order != get_order(bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) pr_warn("only able to allocate %ld MB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) (PAGE_SIZE << order) >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) io_tlb_nslabs = SLABS_PER_PAGE << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) free_pages((unsigned long)vstart, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void swiotlb_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) io_tlb_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) io_tlb_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) io_tlb_nslabs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) max_segment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned long i, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) bytes = nslabs << IO_TLB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) io_tlb_nslabs = nslabs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) io_tlb_start = virt_to_phys(tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) io_tlb_end = io_tlb_start + bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) memset(tlb, 0, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Allocate and initialize the free list array. This array is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * between io_tlb_start and io_tlb_end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) get_order(io_tlb_nslabs * sizeof(int)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!io_tlb_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) goto cleanup3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) io_tlb_orig_addr = (phys_addr_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) __get_free_pages(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) get_order(io_tlb_nslabs *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) sizeof(phys_addr_t)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!io_tlb_orig_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) goto cleanup4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) for (i = 0; i < io_tlb_nslabs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) io_tlb_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) no_iotlb_memory = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) swiotlb_print_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) late_alloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) cleanup4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) sizeof(int)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) io_tlb_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) cleanup3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) swiotlb_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) void __init swiotlb_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!io_tlb_orig_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (late_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) free_pages((unsigned long)io_tlb_orig_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) sizeof(int)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) free_pages((unsigned long)phys_to_virt(io_tlb_start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) get_order(io_tlb_nslabs << IO_TLB_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) memblock_free_late(__pa(io_tlb_orig_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) memblock_free_late(__pa(io_tlb_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) memblock_free_late(io_tlb_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) swiotlb_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * Bounce: copy the swiotlb buffer from or back to the original dma location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned long pfn = PFN_DOWN(orig_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) unsigned char *vaddr = phys_to_virt(tlb_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (PageHighMem(pfn_to_page(pfn))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* The buffer does not have a mapping. Map it in and copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned int offset = orig_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned int sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) while (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) sz = min_t(size_t, PAGE_SIZE - offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) buffer = kmap_atomic(pfn_to_page(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (dir == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) memcpy(vaddr, buffer + offset, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) memcpy(buffer + offset, vaddr, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) kunmap_atomic(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) size -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) pfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) vaddr += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) } else if (dir == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) memcpy(vaddr, phys_to_virt(orig_addr), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) memcpy(phys_to_virt(orig_addr), vaddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) #define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Return the offset into a iotlb slot required to keep the device happy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static inline unsigned long get_max_slots(unsigned long boundary_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (boundary_mask == ~0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return nr_slots(boundary_mask + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static unsigned int wrap_index(unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (index >= io_tlb_nslabs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * Find a suitable number of IO TLB entries size that will fit this request and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * allocate a buffer from that IO TLB pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int find_slots(struct device *dev, phys_addr_t orig_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) size_t alloc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned long boundary_mask = dma_get_seg_boundary(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) dma_addr_t tbl_dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) phys_to_dma_unencrypted(dev, io_tlb_start) & boundary_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned long max_slots = get_max_slots(boundary_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) unsigned int iotlb_align_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned int nslots = nr_slots(alloc_size), stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) unsigned int index, wrap, count = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) BUG_ON(!nslots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * For mappings with an alignment requirement don't bother looping to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * unaligned slots once we found an aligned one. For allocations of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * PAGE_SIZE or larger only look for page aligned allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (alloc_size >= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) spin_lock_irqsave(&io_tlb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (unlikely(nslots > io_tlb_nslabs - io_tlb_used))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) goto not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) index = wrap = wrap_index(ALIGN(io_tlb_index, stride));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if ((slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) (orig_addr & iotlb_align_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) index = wrap_index(index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * If we find a slot that indicates we have 'nslots' number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * contiguous buffers, we allocate the buffers from that slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * and mark the entries as '0' indicating unavailable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (!iommu_is_span_boundary(index, nslots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) nr_slots(tbl_dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) max_slots)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (io_tlb_list[index] >= nslots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) index = wrap_index(index + stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) } while (index != wrap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) not_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) spin_unlock_irqrestore(&io_tlb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) for (i = index; i < index + nslots; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) io_tlb_list[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) for (i = index - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) io_tlb_list[i]; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) io_tlb_list[i] = ++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Update the indices to avoid searching in the next round.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (index + nslots < io_tlb_nslabs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) io_tlb_index = index + nslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) io_tlb_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) io_tlb_used += nslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) spin_unlock_irqrestore(&io_tlb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) size_t mapping_size, size_t alloc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) unsigned int offset = swiotlb_align_offset(dev, orig_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) phys_addr_t tlb_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (no_iotlb_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (mem_encrypt_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (mapping_size > alloc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) mapping_size, alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return (phys_addr_t)DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) index = find_slots(dev, orig_addr, alloc_size + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (index == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!(attrs & DMA_ATTR_NO_WARN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dev_warn_ratelimited(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) alloc_size, io_tlb_nslabs, io_tlb_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return (phys_addr_t)DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * Save away the mapping from the original address to the DMA address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * This is needed when we sync the memory. Then we sync the buffer if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) for (i = 0; i < nr_slots(alloc_size + offset); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) tlb_addr = slot_addr(io_tlb_start, index) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dir == DMA_BIDIRECTIONAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return tlb_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * tlb_addr is the physical address of the bounce buffer to unmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) size_t mapping_size, size_t alloc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) int i, count, nslots = nr_slots(alloc_size + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) phys_addr_t orig_addr = io_tlb_orig_addr[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * First, sync the memory before unmapping the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (orig_addr != INVALID_PHYS_ADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Return the buffer to the free list by setting the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * entries to indicate the number of contiguous entries available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * While returning the entries to the free list, we merge the entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * with slots below and above the pool being returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) spin_lock_irqsave(&io_tlb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) count = io_tlb_list[index + nslots];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * Step 1: return the slots to the free list, merging the slots with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * superceeding slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) for (i = index + nslots - 1; i >= index; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) io_tlb_list[i] = ++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Step 2: merge the returned slots with the preceding slots, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * available (non zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) for (i = index - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && io_tlb_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) io_tlb_list[i] = ++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) io_tlb_used -= nslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spin_unlock_irqrestore(&io_tlb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) enum dma_sync_target target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) phys_addr_t orig_addr = io_tlb_orig_addr[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (orig_addr == INVALID_PHYS_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) orig_addr += (tlb_addr & (IO_TLB_SIZE - 1)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) swiotlb_align_offset(hwdev, orig_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) switch (target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) case SYNC_FOR_CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) swiotlb_bounce(orig_addr, tlb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) BUG_ON(dir != DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) case SYNC_FOR_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) swiotlb_bounce(orig_addr, tlb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) BUG_ON(dir != DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * to the device copy the data into it as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) phys_addr_t swiotlb_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) swiotlb_force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Ensure that the address returned is DMA'ble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) attrs | DMA_ATTR_SKIP_CPU_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dev_WARN_ONCE(dev, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) arch_sync_dma_for_device(swiotlb_addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) size_t swiotlb_max_mapping_size(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) bool is_swiotlb_active(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * When SWIOTLB is initialized, even if io_tlb_start points to physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * address zero, io_tlb_end surely doesn't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return io_tlb_end != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static int __init swiotlb_create_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct dentry *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) root = debugfs_create_dir("swiotlb", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) debugfs_create_ulong("io_tlb_nslabs", 0400, root, &io_tlb_nslabs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) debugfs_create_ulong("io_tlb_used", 0400, root, &io_tlb_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) late_initcall(swiotlb_create_debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) #endif