Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_POWERPC_PAGE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_POWERPC_PAGE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2001,2005 IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/asm-const.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * page size. When using 64K pages however, whether we are really supporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * 64K pages in HW or not is irrelevant to those definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define PAGE_SHIFT		CONFIG_PPC_PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define PAGE_SIZE		(ASM_CONST(1) << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #ifndef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define HPAGE_SHIFT PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #elif defined(CONFIG_PPC_BOOK3S_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) extern unsigned int hpage_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define HPAGE_SHIFT hpage_shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #elif defined(CONFIG_PPC_8xx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define HPAGE_SHIFT		19	/* 512k pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #elif defined(CONFIG_PPC_FSL_BOOK3E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define HPAGE_SHIFT		22	/* 4M pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define HPAGE_SIZE		((1UL) << HPAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define HUGE_MAX_HSTATE		(MMU_PAGE_COUNT-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * assign PAGE_MASK to a larger type it gets extended the way we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * (i.e. with 1s in the high bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * KERNELBASE is the virtual address of the start of the kernel, it's often
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * the same as PAGE_OFFSET, but _might not be_.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * PAGE_OFFSET is the virtual address of the start of lowmem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * PHYSICAL_START is the physical address of the start of the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * MEMORY_START is the physical address of the start of lowmem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * ppc32 and based on how they are set we determine MEMORY_START.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * For the linear mapping the following equation should be true:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * There are two ways to determine a physical address from a virtual one:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * va = pa + PAGE_OFFSET - MEMORY_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * va = pa + KERNELBASE - PHYSICAL_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * If you want to know something's offset from the start of the kernel you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * should subtract KERNELBASE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * If you want to test if something's a kernel address, use is_kernel_addr().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define KERNELBASE      ASM_CONST(CONFIG_KERNEL_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define PAGE_OFFSET	ASM_CONST(CONFIG_PAGE_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #define LOAD_OFFSET	ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #if defined(CONFIG_NONSTATIC_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) extern phys_addr_t memstart_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) extern phys_addr_t kernstart_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) extern long long virt_phys_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define PHYSICAL_START	kernstart_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #else	/* !CONFIG_NONSTATIC_KERNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #define PHYSICAL_START	ASM_CONST(CONFIG_PHYSICAL_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* See Description below for VIRT_PHYS_OFFSET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define VIRT_PHYS_OFFSET virt_phys_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define MEMORY_START	0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #elif defined(CONFIG_NONSTATIC_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define MEMORY_START	memstart_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define MEMORY_START	(PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #ifdef CONFIG_FLATMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define ARCH_PFN_OFFSET		((unsigned long)(MEMORY_START >> PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) extern unsigned long max_mapnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static inline bool pfn_valid(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	unsigned long min_pfn = ARCH_PFN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return pfn >= min_pfn && pfn < max_mapnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define virt_addr_valid(kaddr)	pfn_valid(virt_to_pfn(kaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * On Book-E parts we need __va to parse the device tree and we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * determine MEMORY_START until then.  However we can determine PHYSICAL_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * from information at hand (program counter, TLB lookup).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * On BookE with RELOCATABLE && PPC32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  *   With RELOCATABLE && PPC32,  we support loading the kernel at any physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  *   address without any restriction on the page alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  *   We find the runtime address of _stext and relocate ourselves based on 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  *   the following calculation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  *  	  virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  *  				MODULO(_stext.run,256M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  *   and create the following mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * 	  ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  *   When we process relocations, we cannot depend on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  *   existing equation for the __va()/__pa() translations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * 	   __va(x) = (x)  - PHYSICAL_START + KERNELBASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  *   Where:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  *   	 PHYSICAL_START = kernstart_addr = Physical address of _stext
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  *  	 KERNELBASE = Compiled virtual address of _stext.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  *   This formula holds true iff, kernel load address is TLB page aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *   In our case, we need to also account for the shift in the kernel Virtual 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *   address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  *   E.g.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *   Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  *   In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  *   Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  *                 = 0xbc100000 , which is wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  *   Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  *      	according to our mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  *   Hence we use the following formula to get the translations right:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * 	  __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * 	  Where :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * 		PHYSICAL_START = dynamic load address.(kernstart_addr variable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * 		Effective KERNELBASE = virtual_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * 				     = ALIGN_DOWN(KERNELBASE,256M) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * 						MODULO(PHYSICAL_START,256M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * 	To make the cost of __va() / __pa() more light weight, we introduce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * 	a new variable virt_phys_offset, which will hold :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * 	virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * 			 = ALIGN_DOWN(KERNELBASE,256M) - 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * 			 	ALIGN_DOWN(PHYSICALSTART,256M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * 	Hence :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * 	__va(x) = x - PHYSICAL_START + Effective KERNELBASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * 		= x + virt_phys_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * 		and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * 	__pa(x) = x + PHYSICAL_START - Effective KERNELBASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  * 		= x - virt_phys_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  * 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * the other definitions for __va & __pa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  * This also results in better code generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define __va(x)								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	(void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define __pa(x)								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	(unsigned long)(x) & 0x0fffffffffffffffUL;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #else /* 32-bit, non book E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * and needs to be executable.  This means the whole heap ends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  * up being executable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define VM_DATA_DEFAULT_FLAGS32	VM_DATA_FLAGS_TSK_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define VM_DATA_DEFAULT_FLAGS64	VM_DATA_FLAGS_NON_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #include <asm/page_64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #include <asm/page_32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * "kernelness", use is_kernel_addr() - it should do what you want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #ifdef CONFIG_PPC_BOOK3E_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #define is_kernel_addr(x)	((x) >= 0x8000000000000000ul)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #elif defined(CONFIG_PPC_BOOK3S_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #define is_kernel_addr(x)	((x) >= PAGE_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #define is_kernel_addr(x)	((x) >= TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #ifndef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  * Use the top bit of the higher-level page table entries to indicate whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  * the entries we point to contain hugepages.  This works because we know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  * the page tables live in kernel space.  If we ever decide to support having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * page tables at arbitrary addresses, this breaks and will have to change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define PD_HUGE 0x8000000000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define PD_HUGE 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #else	/* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * Book3S 64 stores real addresses in the hugepd entries to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #define HUGEPD_ADDR_MASK	(0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * Some number of bits at the level of the page table that points to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * a hugepte are used to encode the size.  This masks those bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * On 8xx, HW assistance requires 4k alignment for the hugepte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #ifdef CONFIG_PPC_8xx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define HUGEPD_SHIFT_MASK     0xfff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #define HUGEPD_SHIFT_MASK     0x3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #include <asm/pgtable-be-types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #include <asm/pgtable-types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #ifndef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #define is_hugepd(pdep)		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #define pgd_huge(pgd)		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #endif /* CONFIG_HUGETLB_PAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) extern void copy_user_page(void *to, void *from, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		struct page *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) extern int devmem_is_allowed(unsigned long pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #ifdef CONFIG_PPC_SMLPAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) void arch_free_page(struct page *page, int order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define HAVE_ARCH_FREE_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct vm_area_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) extern unsigned long kernstart_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static inline unsigned long kaslr_offset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	return kernstart_virt_addr - KERNELBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #include <asm-generic/memory_model.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #include <asm/slice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #endif /* _ASM_POWERPC_PAGE_H */