Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * pgtsrmmu.h:  SRMMU page table defines and code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #ifndef _SPARC_PGTSRMMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define _SPARC_PGTSRMMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #ifdef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/thread_info.h>	/* TI_UWINMASK for WINDOW_FLUSH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /* Number of contexts is implementation-dependent; 64k is the most we support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define SRMMU_MAX_CONTEXTS	65536
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define SRMMU_PTE_TABLE_SIZE		(PTRS_PER_PTE*4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define SRMMU_PMD_TABLE_SIZE		(PTRS_PER_PMD*4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define SRMMU_PGD_TABLE_SIZE		(PTRS_PER_PGD*4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /* Definition of the values in the ET field of PTD's and PTE's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define SRMMU_ET_MASK         0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define SRMMU_ET_INVALID      0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define SRMMU_ET_PTD          0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define SRMMU_ET_PTE          0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define SRMMU_ET_REPTE        0x3 /* AIEEE, SuperSparc II reverse endian page! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) /* Physical page extraction from PTP's and PTE's. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define SRMMU_CTX_PMASK    0xfffffff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define SRMMU_PTD_PMASK    0xfffffff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define SRMMU_PTE_PMASK    0xffffff00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) /* The pte non-page bits.  Some notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * 1) cache, dirty, valid, and ref are frobbable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *    for both supervisor and user pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * 2) exec and write will only give the desired effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *    on user pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * 3) use priv and priv_readonly for changing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *    characteristics of supervisor ptes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define SRMMU_CACHE        0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define SRMMU_DIRTY        0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define SRMMU_REF          0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define SRMMU_NOREAD       0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define SRMMU_EXEC         0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define SRMMU_WRITE        0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define SRMMU_VALID        0x02 /* SRMMU_ET_PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define SRMMU_PRIV         0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define SRMMU_PRIV_RDONLY  0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define SRMMU_CHG_MASK    (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /* SRMMU swap entry encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * We use 5 bits for the type and 19 for the offset.  This gives us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * 32 swapfiles of 4GB each.  Encoding looks like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * oooooooooooooooooootttttRRRRRRRR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * fedcba9876543210fedcba9876543210
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * The bottom 7 bits are reserved for protection and status bits, especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * PRESENT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define SRMMU_SWP_TYPE_MASK	0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define SRMMU_SWP_TYPE_SHIFT	7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define SRMMU_SWP_OFF_MASK	0xfffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define SRMMU_SWP_OFF_SHIFT	(SRMMU_SWP_TYPE_SHIFT + 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) /* Some day I will implement true fine grained access bits for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * user pages because the SRMMU gives us the capabilities to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * enforce all the protection levels that vma's can have.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * XXX But for now...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #define SRMMU_PAGE_NONE    __pgprot(SRMMU_CACHE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 				    SRMMU_PRIV | SRMMU_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #define SRMMU_PAGE_SHARED  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 				    SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define SRMMU_PAGE_COPY    __pgprot(SRMMU_VALID | SRMMU_CACHE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 				    SRMMU_EXEC | SRMMU_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 				    SRMMU_EXEC | SRMMU_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 				    SRMMU_DIRTY | SRMMU_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) /* SRMMU Register addresses in ASI 0x4.  These are valid for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * current SRMMU implementations that exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #define SRMMU_CTRL_REG           0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #define SRMMU_CTXTBL_PTR         0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define SRMMU_CTX_REG            0x00000200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define SRMMU_FAULT_STATUS       0x00000300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define SRMMU_FAULT_ADDR         0x00000400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define WINDOW_FLUSH(tmp1, tmp2)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	mov	0, tmp1;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 98:	ld	[%g6 + TI_UWINMASK], tmp2;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	orcc	%g0, tmp2, %g0;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	add	tmp1, 1, tmp1;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	bne	98b;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	 save	%sp, -64, %sp;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 99:	subcc	tmp1, 1, tmp1;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	bne	99b;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 restore %g0, %g0, %g0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) extern unsigned long last_valid_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* This makes sense. Honest it does - Anton */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* XXX Yes but it's ugly as sin.  FIXME. -KMW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) extern void *srmmu_nocache_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Accessing the MMU control register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned int srmmu_get_mmureg(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void srmmu_set_mmureg(unsigned long regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void srmmu_set_ctable_ptr(unsigned long paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void srmmu_set_context(int context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int srmmu_get_context(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned int srmmu_get_fstatus(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned int srmmu_get_faddr(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* This is guaranteed on all SRMMU's. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void srmmu_flush_whole_tlb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			     "r" (0x400),        /* Flush entire TLB!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			     "i" (ASI_M_FLUSH_PROBE) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) srmmu_get_pte (unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	register unsigned long entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)         
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	__asm__ __volatile__("\n\tlda [%1] %2,%0\n\t" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 				"=r" (entry):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 				"r" ((addr & 0xfffff000) | 0x400), "i" (ASI_M_FLUSH_PROBE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #endif /* !(__ASSEMBLY__) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif /* !(_SPARC_PGTSRMMU_H) */