^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright IBM Corp. 2012
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author(s):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Jan Glauber <jang@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/iommu-helper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/pci_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static struct kmem_cache *dma_region_table_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static struct kmem_cache *dma_page_table_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static int s390_iommu_strict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static int zpci_refresh_global(struct zpci_dev *zdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) zdev->iommu_pages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long *dma_alloc_cpu_table(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long *table, *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (!table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *entry = ZPCI_TABLE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void dma_free_cpu_table(void *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) kmem_cache_free(dma_region_table_cache, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static unsigned long *dma_alloc_page_table(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long *table, *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (!table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *entry = ZPCI_PTE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void dma_free_page_table(void *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) kmem_cache_free(dma_page_table_cache, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned long *sto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (reg_entry_isvalid(*entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) sto = get_rt_sto(*entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) sto = dma_alloc_cpu_table();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!sto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) set_rt_sto(entry, sto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) validate_rt_entry(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) entry_clr_protected(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return sto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static unsigned long *dma_get_page_table_origin(unsigned long *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned long *pto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (reg_entry_isvalid(*entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pto = get_st_pto(*entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) pto = dma_alloc_page_table();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (!pto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) set_st_pto(entry, pto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) validate_st_entry(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) entry_clr_protected(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return pto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long *sto, *pto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int rtx, sx, px;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) rtx = calc_rtx(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) sto = dma_get_seg_table_origin(&rto[rtx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!sto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) sx = calc_sx(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) pto = dma_get_page_table_origin(&sto[sx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!pto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) px = calc_px(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return &pto[px];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (flags & ZPCI_PTE_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) invalidate_pt_entry(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) set_pt_pfaa(entry, page_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) validate_pt_entry(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (flags & ZPCI_TABLE_PROTECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) entry_set_protected(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) entry_clr_protected(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) dma_addr_t dma_addr, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u8 *page_addr = (u8 *) (pa & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned long *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int i, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (!nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!zdev->dma_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto undo_cpu_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) dma_update_cpu_trans(entry, page_addr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) page_addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) dma_addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) undo_cpu_trans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) flags = ZPCI_PTE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) while (i-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) page_addr -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) dma_addr -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) dma_update_cpu_trans(entry, page_addr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * With zdev->tlb_refresh == 0, rpcit is not required to establish new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * translations when previously invalid translation-table entries are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * validated. With lazy unmap, rpcit is skipped for previously valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * entries, but a global rpcit is then required before any address can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * be re-used, i.e. after each iommu bitmap wrap-around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!zdev->tlb_refresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!s390_iommu_strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) PAGE_ALIGN(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (ret == -ENOMEM && !s390_iommu_strict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* enable the hypervisor to free some resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (zpci_refresh_global(zdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) zdev->lazy_bitmap, zdev->iommu_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) dma_addr_t dma_addr, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) void dma_free_seg_table(unsigned long entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) unsigned long *sto = get_rt_sto(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int sx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (reg_entry_isvalid(sto[sx]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dma_free_page_table(get_st_pto(sto[sx]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dma_free_cpu_table(sto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void dma_cleanup_tables(unsigned long *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int rtx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (reg_entry_isvalid(table[rtx]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) dma_free_seg_table(table[rtx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) dma_free_cpu_table(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static unsigned long __dma_alloc_iommu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned long start, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) start, size, zdev->start_dma >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static dma_addr_t dma_alloc_address(struct device *dev, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned long offset, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (offset == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!s390_iommu_strict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* global flush before DMA addresses are reused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (zpci_refresh_global(zdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) zdev->lazy_bitmap, zdev->iommu_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* wrap-around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) offset = __dma_alloc_iommu(dev, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) zdev->next_bit = offset + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return zdev->start_dma + offset * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned long flags, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (!zdev->iommu_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (s390_iommu_strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) bitmap_clear(zdev->iommu_bitmap, offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) bitmap_set(zdev->lazy_bitmap, offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) unsigned long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) } __packed data = {rc, addr};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) zpci_err_hex(&data, sizeof(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned long offset, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned long pa = page_to_phys(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int flags = ZPCI_PTE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned long nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* This rounds up number of pages based on size and offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dma_addr = dma_alloc_address(dev, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (dma_addr == DMA_MAPPING_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* Use rounded up size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) size = nr_pages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) flags |= ZPCI_TABLE_PROTECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) atomic64_add(nr_pages, &zdev->mapped_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return dma_addr + (offset & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dma_free_address(dev, dma_addr, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) zpci_err("map error:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) zpci_err_dma(ret, pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) size_t size, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int npages, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dma_addr = dma_addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ZPCI_PTE_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) zpci_err("unmap error:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) zpci_err_dma(ret, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) atomic64_add(npages, &zdev->unmapped_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dma_free_address(dev, dma_addr, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static void *s390_dma_alloc(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) dma_addr_t *dma_handle, gfp_t flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned long pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) dma_addr_t map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) page = alloc_pages(flag | __GFP_ZERO, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) pa = page_to_phys(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (dma_mapping_error(dev, map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) free_pages(pa, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *dma_handle = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return (void *) pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void s390_dma_free(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) void *pa, dma_addr_t dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) free_pages((unsigned long) pa, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Map a segment into a contiguous dma address area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) size_t size, dma_addr_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) dma_addr_t dma_addr_base, dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int flags = ZPCI_PTE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) unsigned long pa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) dma_addr_base = dma_alloc_address(dev, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (dma_addr_base == DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dma_addr = dma_addr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) flags |= ZPCI_TABLE_PROTECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pa = page_to_phys(sg_page(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ret = __dma_update_trans(zdev, pa, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) s->offset + s->length, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dma_addr += s->offset + s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) *handle = dma_addr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) atomic64_add(nr_pages, &zdev->mapped_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) ZPCI_PTE_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dma_free_address(dev, dma_addr_base, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) zpci_err("map error:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) zpci_err_dma(ret, pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int nr_elements, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct scatterlist *s = sg, *start = sg, *dma = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) unsigned int max = dma_get_max_seg_size(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) unsigned int size = s->offset + s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned int offset = s->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int count = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) for (i = 1; i < nr_elements; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) s = sg_next(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) s->dma_address = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) s->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (s->offset || (size & ~PAGE_MASK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) size + s->length > max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (__s390_dma_map_sg(dev, start, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) &dma->dma_address, dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dma->dma_address += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) dma->dma_length = size - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) size = offset = s->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) start = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dma = sg_next(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) size += s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dma->dma_address += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dma->dma_length = size - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return count + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) for_each_sg(sg, s, count, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) int nr_elements, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) for_each_sg(sg, s, nr_elements, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (s->dma_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) s->dma_address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) s->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int zpci_dma_init_device(struct zpci_dev *zdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * At this point, if the device is part of an IOMMU domain, this would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * be a strong hint towards a bug in the IOMMU API (common) code and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * simultaneous access via IOMMU and DMA API. So let's issue a warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) WARN_ON(zdev->s390_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) spin_lock_init(&zdev->iommu_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) spin_lock_init(&zdev->dma_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) zdev->dma_table = dma_alloc_cpu_table();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!zdev->dma_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * Restrict the iommu bitmap size to the minimum of the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * - main memory size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * - 3-level pagetable address limit minus start_dma offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * - DMA address range allowed by the hardware (clp query pci fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * Also set zdev->end_dma to the actual end address of the usable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * range, instead of the theoretical maximum as reported by hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) zdev->iommu_size = min3((u64) high_memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ZPCI_TABLE_SIZE_RT - zdev->start_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) zdev->end_dma - zdev->start_dma + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (!zdev->iommu_bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) goto free_dma_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!s390_iommu_strict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (!zdev->lazy_bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) goto free_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) (u64) zdev->dma_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) goto free_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) free_bitmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) vfree(zdev->iommu_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) zdev->iommu_bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) vfree(zdev->lazy_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) zdev->lazy_bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) free_dma_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) dma_free_cpu_table(zdev->dma_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) zdev->dma_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) void zpci_dma_exit_device(struct zpci_dev *zdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * At this point, if the device is part of an IOMMU domain, this would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * be a strong hint towards a bug in the IOMMU API (common) code and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * simultaneous access via IOMMU and DMA API. So let's issue a warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) WARN_ON(zdev->s390_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (zpci_unregister_ioat(zdev, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) dma_cleanup_tables(zdev->dma_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) zdev->dma_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) vfree(zdev->iommu_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) zdev->iommu_bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) vfree(zdev->lazy_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) zdev->lazy_bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) zdev->next_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static int __init dma_alloc_cpu_table_caches(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (!dma_region_table_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!dma_page_table_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) kmem_cache_destroy(dma_region_table_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int __init zpci_dma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return dma_alloc_cpu_table_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) void zpci_dma_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) kmem_cache_destroy(dma_page_table_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) kmem_cache_destroy(dma_region_table_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) const struct dma_map_ops s390_pci_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) .alloc = s390_dma_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) .free = s390_dma_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) .map_sg = s390_dma_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) .unmap_sg = s390_dma_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) .map_page = s390_dma_map_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) .unmap_page = s390_dma_unmap_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) .mmap = dma_common_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) .get_sgtable = dma_common_get_sgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) .alloc_pages = dma_common_alloc_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) .free_pages = dma_common_free_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* dma_supported is unconditionally true without a callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static int __init s390_iommu_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (!strcmp(str, "strict"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) s390_iommu_strict = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) __setup("s390_iommu=", s390_iommu_setup);