^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/alpha/kernel/pci_iommu.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/iommu-helper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/hwrpb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "proto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "pci_impl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define DEBUG_ALLOC 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #if DEBUG_ALLOC > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) # define DBGA(args...) printk(KERN_DEBUG args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) # define DBGA(args...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #if DEBUG_ALLOC > 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) # define DBGA2(args...) printk(KERN_DEBUG args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) # define DBGA2(args...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define DEBUG_NODIRECT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define ISA_DMA_MASK 0x00ffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) mk_iommu_pte(unsigned long paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return (paddr >> (PAGE_SHIFT-1)) | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* Return the minimum of MAX or the first power of two larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) than main memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) size_for_memory(unsigned long max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned long mem = max_low_pfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (mem < max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) max = roundup_pow_of_two(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct pci_iommu_arena * __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long window_size, unsigned long align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct pci_iommu_arena *arena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Note that the TLB lookup logic uses bitwise concatenation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) not addition, so the required arena alignment is based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) the size of the window. Retain the align parameter so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) particular systems can over-align the arena. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (align < mem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) align = mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #ifdef CONFIG_DISCONTIGMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) arena = memblock_alloc_node(sizeof(*arena), align, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!NODE_DATA(nid) || !arena) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) printk("%s: couldn't allocate arena from node %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) " falling back to system-wide allocation\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __func__, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (!arena)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) sizeof(*arena));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!NODE_DATA(nid) || !arena->ptes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) printk("%s: couldn't allocate arena ptes from node %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) " falling back to system-wide allocation\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) __func__, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) arena->ptes = memblock_alloc(mem_size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (!arena->ptes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) __func__, mem_size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #else /* CONFIG_DISCONTIGMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (!arena)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) sizeof(*arena));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) arena->ptes = memblock_alloc(mem_size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!arena->ptes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) __func__, mem_size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif /* CONFIG_DISCONTIGMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spin_lock_init(&arena->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) arena->hose = hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) arena->dma_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) arena->size = window_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) arena->next_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Align allocations to a multiple of a page size. Not needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unless there are chip bugs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) arena->align_entry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return arena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct pci_iommu_arena * __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned long window_size, unsigned long align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return iommu_arena_new_node(0, hose, base, window_size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Must be called with the arena lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) long n, long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned long *ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) long i, p, nent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int pass = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned long base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned long boundary_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) base = arena->dma_base >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Search forward for the first mask-aligned sequence of N free ptes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ptes = arena->ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) nent = arena->size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) p = ALIGN(arena->next_entry, mask + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) while (i < n && p+i < nent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) p = ALIGN(p + 1, mask + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (ptes[p+i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) p = ALIGN(p + i + 1, mask + 1), i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) i = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (i < n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (pass < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Reached the end. Flush the TLB and restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * the search from the beginning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) pass++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Success. It's the responsibility of the caller to mark them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) in use before releasing the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned int align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned long *ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) long i, p, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) spin_lock_irqsave(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Search for N empty ptes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ptes = arena->ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mask = max(align, arena->align_entry) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) p = iommu_arena_find_pages(dev, arena, n, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (p < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) spin_unlock_irqrestore(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* Success. Mark them all in use, ie not zero and invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) for the iommu tlb that could load them from under us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) The chip specific bits will fill this in with something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) kosher when we return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) for (i = 0; i < n; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ptes[p+i] = IOMMU_INVALID_PTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) arena->next_entry = p + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) spin_unlock_irqrestore(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned long *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) p = arena->ptes + ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) for (i = 0; i < n; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) p[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * True if the machine supports DAC addressing, and DEV can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * make use of it given MASK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* If this is not set, the machine doesn't support DAC at all. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (dac_offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* The device has to be able to address our DAC bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if ((dac_offset & dev->dma_mask) != dac_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* If both conditions above are met, we are fine. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) DBGA("pci_dac_dma_supported %s from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ok ? "yes" : "no", __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* Map a single buffer of the indicated size for PCI DMA in streaming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) mode. The 32-bit PCI bus mastering address to use is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) Once the device is given the dma address, the device owns this memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) until either pci_unmap_single or pci_dma_sync_single is performed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static dma_addr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int dac_allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct pci_iommu_arena *arena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) long npages, dma_ofs, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned long paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dma_addr_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned int align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct device *dev = pdev ? &pdev->dev : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) paddr = __pa(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #if !DEBUG_NODIRECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* First check to see if we can use the direct map window. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (paddr + size + __direct_map_base - 1 <= max_dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) && paddr + size <= __direct_map_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ret = paddr + __direct_map_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) cpu_addr, size, ret, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Next, use DAC if selected earlier. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (dac_allowed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ret = paddr + alpha_mv.pci_dac_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) cpu_addr, size, ret, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* If the machine doesn't define a pci_tbi routine, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) assume it doesn't support sg mapping, and, since we tried to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) use direct_map above, it now must be considered an error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (! alpha_mv.mv_pci_tbi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) arena = hose->sg_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!arena || arena->dma_base + arena->size - 1 > max_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) arena = hose->sg_isa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) npages = iommu_num_pages(paddr, size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Force allocation to 64KB boundary for ISA bridges. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (pdev && pdev == isa_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) align = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (dma_ofs < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) printk(KERN_WARNING "pci_map_single failed: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) "could not allocate dma page tables\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) paddr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ret = arena->dma_base + dma_ofs * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ret += (unsigned long)cpu_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) cpu_addr, size, npages, ret, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Helper for generic DMA-mapping functions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (dev && dev_is_pci(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) BUG() otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) BUG_ON(!isa_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) bridge is bus master then). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!dev || !dev->dma_mask || !*dev->dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return isa_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* For EISA bus masters, return isa_bridge (it might have smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dma_mask due to wiring limitations). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (*dev->dma_mask >= isa_bridge->dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return isa_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* This assumes ISA bus master with dma_mask 0xffffff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long offset, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct pci_dev *pdev = alpha_gendev_to_pci(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int dac_allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) BUG_ON(dir == PCI_DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return pci_map_single_1(pdev, (char *)page_address(page) + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) size, dac_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) SIZE must match what was provided for in a previous pci_map_single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) call. All other usages are undefined. After this call, reads by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) the cpu to the buffer are guaranteed to see whatever the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) wrote there. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct pci_dev *pdev = alpha_gendev_to_pci(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct pci_iommu_arena *arena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) long dma_ofs, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) BUG_ON(dir == PCI_DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (dma_addr >= __direct_map_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) && dma_addr < __direct_map_base + __direct_map_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* Nothing to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) dma_addr, size, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (dma_addr > 0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) dma_addr, size, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) arena = hose->sg_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (!arena || dma_addr < arena->dma_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) arena = hose->sg_isa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (dma_ofs * PAGE_SIZE >= arena->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) " base %llx size %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) dma_addr, arena->dma_base, arena->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) spin_lock_irqsave(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) iommu_arena_free(arena, dma_ofs, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* If we're freeing ptes above the `next_entry' pointer (they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) may have snuck back into the TLB since the last wrap flush),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) we need to flush the TLB before reallocating the latter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (dma_ofs >= arena->next_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) spin_unlock_irqrestore(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dma_addr, size, npages, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* Allocate and map kernel buffer using consistent mode DMA for PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) device. Returns non-NULL cpu-view pointer to the buffer if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) successful and sets *DMA_ADDRP to the pci side dma address as well,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) else DMA_ADDRP is undefined. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dma_addr_t *dma_addrp, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct pci_dev *pdev = alpha_gendev_to_pci(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) void *cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) long order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) gfp &= ~GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) try_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (! cpu_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) printk(KERN_INFO "pci_alloc_consistent: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) "get_free_pages failed from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* ??? Really atomic allocation? Otherwise we could play
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) with vmalloc and sg if we can't find contiguous memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) memset(cpu_addr, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (*dma_addrp == DMA_MAPPING_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) free_pages((unsigned long)cpu_addr, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* The address doesn't fit required mask and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) do not have iommu. Try again with GFP_DMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) gfp |= GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto try_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) size, cpu_addr, *dma_addrp, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) be values that were returned from pci_alloc_consistent. SIZE must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) be the same as what as passed into pci_alloc_consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) References to the memory and mappings associated with CPU_ADDR or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) DMA_ADDR past this call are illegal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void alpha_pci_free_coherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) void *cpu_addr, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct pci_dev *pdev = alpha_gendev_to_pci(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) free_pages((unsigned long)cpu_addr, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) dma_addr, size, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* Classify the elements of the scatterlist. Write dma_address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) of each element with:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 0 : Followers all physically adjacent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 1 : Followers all virtually adjacent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) -1 : Not leader, physically adjacent to previous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) -2 : Not leader, virtually adjacent to previous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) Write dma_length of each leader with the combined lengths of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) the mergable followers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int virt_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) unsigned long next_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct scatterlist *leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) long leader_flag, leader_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) unsigned int max_seg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) leader = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) leader_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) leader_length = leader->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* we will not marge sg without device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) for (++sg; sg < end; ++sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) unsigned long addr, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) addr = SG_ENT_PHYS_ADDRESS(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) len = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (leader_length + len > max_seg_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) goto new_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (next_paddr == addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) sg->dma_address = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) leader_length += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) sg->dma_address = -2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) leader_flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) leader_length += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) new_segment:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) leader->dma_address = leader_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) leader->dma_length = leader_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) leader = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) leader_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) leader_length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) next_paddr = addr + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) leader->dma_address = leader_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) leader->dma_length = leader_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* Given a scatterlist leader, choose an allocation method and fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) in the blanks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct scatterlist *out, struct pci_iommu_arena *arena,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dma_addr_t max_dma, int dac_allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) long size = leader->dma_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) unsigned long *ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) long npages, dma_ofs, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) #if !DEBUG_NODIRECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* If everything is physically contiguous, and the addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) fall into the direct-map window, use it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (leader->dma_address == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) && paddr + size + __direct_map_base - 1 <= max_dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) && paddr + size <= __direct_map_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) out->dma_address = paddr + __direct_map_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) out->dma_length = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) __va(paddr), size, out->dma_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* If physically contiguous and DAC is available, use it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (leader->dma_address == 0 && dac_allowed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) out->dma_address = paddr + alpha_mv.pci_dac_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) out->dma_length = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) __va(paddr), size, out->dma_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Otherwise, we'll use the iommu to make the pages virtually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) contiguous. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) paddr &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) npages = iommu_num_pages(paddr, size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (dma_ofs < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* If we attempted a direct map above but failed, die. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (leader->dma_address == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* Otherwise, break up the remaining virtually contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) hunks into individual direct maps and retry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) sg_classify(dev, leader, end, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) out->dma_length = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) __va(paddr), size, out->dma_address, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* All virtually contiguous. We need to find the length of each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) physically contiguous subsegment to fill in the ptes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ptes = &arena->ptes[dma_ofs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) sg = leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #if DEBUG_ALLOC > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct scatterlist *last_sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) size = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) paddr = SG_ENT_PHYS_ADDRESS(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) while (sg+1 < end && (int) sg[1].dma_address == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) size += sg[1].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) npages = iommu_num_pages(paddr, size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) paddr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) *ptes++ = mk_iommu_pte(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #if DEBUG_ALLOC > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) DBGA(" (%ld) [%p,%x] np %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) last_sg->length, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) while (++last_sg <= sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) DBGA(" (%ld) [%p,%x] cont\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) last_sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) } while (++sg < end && (int) sg->dma_address < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) int nents, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct pci_dev *pdev = alpha_gendev_to_pci(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct scatterlist *start, *end, *out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct pci_controller *hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct pci_iommu_arena *arena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dma_addr_t max_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int dac_allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) BUG_ON(dir == PCI_DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Fast path single entry scatterlists. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (nents == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) sg->dma_length = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) sg->dma_address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) sg->length, dac_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return sg->dma_address != DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) start = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) end = sg + nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* First, prepare information about the entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* Second, figure out where we're going to map things. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (alpha_mv.mv_pci_tbi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) hose = pdev ? pdev->sysdata : pci_isa_hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) arena = hose->sg_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (!arena || arena->dma_base + arena->size - 1 > max_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) arena = hose->sg_isa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) max_dma = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) arena = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) hose = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* Third, iterate over the scatterlist leaders and allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) dma space as needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) for (out = sg; sg < end; ++sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if ((int) sg->dma_address < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) out++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* Mark the end of the list for pci_unmap_sg. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (out < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) out->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (out - start == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) DBGA("pci_map_sg: %ld entries\n", out - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return out - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) printk(KERN_WARNING "pci_map_sg failed: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) "could not allocate dma page tables\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Some allocation failed while mapping the scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) entries. Unmap them now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (out > start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) pci_unmap_sg(pdev, start, out - start, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Unmap a set of streaming mode DMA translations. Again, cpu read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) rules concerning calls here are the same as for pci_unmap_single()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int nents, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct pci_dev *pdev = alpha_gendev_to_pci(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct pci_controller *hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct pci_iommu_arena *arena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct scatterlist *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) dma_addr_t max_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) dma_addr_t fbeg, fend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) BUG_ON(dir == PCI_DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (! alpha_mv.mv_pci_tbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) hose = pdev ? pdev->sysdata : pci_isa_hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) arena = hose->sg_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (!arena || arena->dma_base + arena->size - 1 > max_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) arena = hose->sg_isa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) fbeg = -1, fend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) spin_lock_irqsave(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) for (end = sg + nents; sg < end; ++sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) long npages, ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) dma_addr_t tend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) addr = sg->dma_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) size = sg->dma_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (addr > 0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /* It's a DAC address -- nothing to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) DBGA(" (%ld) DAC [%llx,%zx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) sg - end + nents, addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (addr >= __direct_map_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) && addr < __direct_map_base + __direct_map_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* Nothing to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) DBGA(" (%ld) direct [%llx,%zx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) sg - end + nents, addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) DBGA(" (%ld) sg [%llx,%zx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) sg - end + nents, addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) npages = iommu_num_pages(addr, size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) iommu_arena_free(arena, ofs, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) tend = addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (fbeg > addr) fbeg = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (fend < tend) fend = tend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* If we're freeing ptes above the `next_entry' pointer (they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) may have snuck back into the TLB since the last wrap flush),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) we need to flush the TLB before reallocating the latter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) alpha_mv.mv_pci_tbi(hose, fbeg, fend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) spin_unlock_irqrestore(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Return whether the given PCI device DMA address mask can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) supported properly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static int alpha_pci_supported(struct device *dev, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct pci_dev *pdev = alpha_gendev_to_pci(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct pci_controller *hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct pci_iommu_arena *arena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* If there exists a direct map, and the mask fits either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) the entire direct mapped space or the total system memory as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) shifted by the map base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (__direct_map_size != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) && (__direct_map_base + __direct_map_size - 1 <= mask ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /* Check that we have a scatter-gather arena that fits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) hose = pdev ? pdev->sysdata : pci_isa_hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) arena = hose->sg_isa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (arena && arena->dma_base + arena->size - 1 <= mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) arena = hose->sg_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (arena && arena->dma_base + arena->size - 1 <= mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* As last resort try ZONE_DMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * AGP GART extensions to the IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) unsigned long *ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) long i, p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!arena) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) spin_lock_irqsave(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* Search for N empty ptes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ptes = arena->ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (p < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) spin_unlock_irqrestore(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* Success. Mark them all reserved (ie not zero and invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) for the iommu tlb that could load them from under us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) They will be filled in with valid bits by _bind() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) for (i = 0; i < pg_count; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ptes[p+i] = IOMMU_RESERVED_PTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) arena->next_entry = p + pg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) spin_unlock_irqrestore(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) unsigned long *ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (!arena) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ptes = arena->ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /* Make sure they're all reserved first... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) for(i = pg_start; i < pg_start + pg_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (ptes[i] != IOMMU_RESERVED_PTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) iommu_arena_free(arena, pg_start, pg_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct page **pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) unsigned long *ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) long i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (!arena) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) spin_lock_irqsave(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ptes = arena->ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) for(j = pg_start; j < pg_start + pg_count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (ptes[j] != IOMMU_RESERVED_PTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) spin_unlock_irqrestore(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) for(i = 0, j = pg_start; i < pg_count; i++, j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) spin_unlock_irqrestore(&arena->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) unsigned long *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (!arena) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) p = arena->ptes + pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) for(i = 0; i < pg_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) p[i] = IOMMU_RESERVED_PTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) const struct dma_map_ops alpha_pci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) .alloc = alpha_pci_alloc_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) .free = alpha_pci_free_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) .map_page = alpha_pci_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) .unmap_page = alpha_pci_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) .map_sg = alpha_pci_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) .unmap_sg = alpha_pci_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) .dma_supported = alpha_pci_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) .mmap = dma_common_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) .get_sgtable = dma_common_get_sgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) .alloc_pages = dma_common_alloc_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) .free_pages = dma_common_free_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) EXPORT_SYMBOL(alpha_pci_ops);