Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Virtual DMA allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * 11/26/2000 -- disabled the existing code because it didn't work for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * me in 2.4.  Replaced with a significantly more primitive version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * similar to the sun3 code.  the old functionality was probably more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * desirable, but....   -- Sam Creasey (sammy@oh.verio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/sun3x.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/dvma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /* IOMMU support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define IOMMU_ADDR_MASK            0x03ffe000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define IOMMU_CACHE_INHIBIT        0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define IOMMU_FULL_BLOCK           0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define IOMMU_MODIFIED             0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define IOMMU_USED                 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define IOMMU_WRITE_PROTECT        0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define IOMMU_DT_MASK              0x00000003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define IOMMU_DT_INVALID           0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define IOMMU_DT_VALID             0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define IOMMU_DT_BAD               0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define dvma_entry_paddr(index)		(iommu_pte[index] & IOMMU_ADDR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define dvma_entry_vaddr(index,paddr)	((index << DVMA_PAGE_SHIFT) |  \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 					 (paddr & (DVMA_PAGE_SIZE-1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define dvma_entry_set(index,addr)	(iommu_pte[index] =            \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 					    (addr & IOMMU_ADDR_MASK) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 				             IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define dvma_entry_set(index,addr)	(iommu_pte[index] =            \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 					    (addr & IOMMU_ADDR_MASK) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 				             IOMMU_DT_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define dvma_entry_clr(index)		(iommu_pte[index] = IOMMU_DT_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define dvma_entry_hash(addr)		((addr >> DVMA_PAGE_SHIFT) ^ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 					 ((addr & 0x03c00000) >>     \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 						(DVMA_PAGE_SHIFT+4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) /* code to print out a dvma mapping for debugging purposes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) void dvma_print (unsigned long dvma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	unsigned long index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	index = dvma_addr >> DVMA_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	pr_info("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		dvma_entry_paddr(index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) /* create a virtual mapping for a page assigned within the IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)    so that the cpu can reach it easily */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) inline int dvma_map_cpu(unsigned long kaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 			       unsigned long vaddr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	kaddr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	vaddr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	end = PAGE_ALIGN(vaddr + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	pgd = pgd_offset_k(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	p4d = p4d_offset(pgd, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	pud = pud_offset(p4d, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		unsigned long end2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		if((pmd = pmd_alloc(&init_mm, pud, vaddr)) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			end2 = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			unsigned long end3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 				end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 				end3 = end2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 				pr_debug("mapping %08lx phys to %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 					 __pa(kaddr), vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 				set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 						     PAGE_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 				pte++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 				kaddr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 				vaddr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			} while(vaddr < end3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		} while(vaddr < end2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	} while(vaddr < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 				 int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	unsigned long end, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	index = baddr >> DVMA_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	end = ((baddr+len) >> DVMA_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if(len & ~DVMA_PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		end++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	for(; index < end ; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) //		if(dvma_entry_use(index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) //			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) //		pr_info("mapping pa %lx to ba %lx\n", __pa(kaddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) //			index << DVMA_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		dvma_entry_set(index, __pa(kaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		iommu_pte[index] |= IOMMU_FULL_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) //		dvma_entry_inc(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		kaddr += DVMA_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		dvma_print(index << DVMA_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void dvma_unmap_iommu(unsigned long baddr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	int index, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	index = baddr >> DVMA_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	for(; index < end ; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		pr_debug("freeing bus mapping %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			 index << DVMA_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		if(!dvma_entry_use(index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			pr_info("dvma_unmap freeing unused entry %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 				index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			dvma_entry_dec(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		dvma_entry_clr(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }