Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Kernel page table mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2015 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #ifndef __ASM_KERNEL_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define __ASM_KERNEL_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/pgtable-hwdef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/sparsemem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * The linear mapping and the start of memory are both 2M aligned (per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * the arm64 booting.txt requirements). Hence we can use section mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * with 4K (section size = 2M) but not with 16K (section size = 32M) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * 64K (section size = 512M).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #ifdef CONFIG_ARM64_4K_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define ARM64_SWAPPER_USES_SECTION_MAPS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define ARM64_SWAPPER_USES_SECTION_MAPS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * The idmap and swapper page tables need some space reserved in the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * map the kernel. With the 64K page configuration, swapper and idmap need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * map to pte level. The swapper also maps the FDT (see __create_page_tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * for more information). Note that the number of ID map translation levels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * could be increased on the fly if system RAM is out of reach for the default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * VA range, so pages required to map highest possible PA are reserved in all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #if ARM64_SWAPPER_USES_SECTION_MAPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define SWAPPER_PGTABLE_LEVELS	(CONFIG_PGTABLE_LEVELS - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define IDMAP_PGTABLE_LEVELS	(ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define SWAPPER_PGTABLE_LEVELS	(CONFIG_PGTABLE_LEVELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define IDMAP_PGTABLE_LEVELS	(ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * If KASLR is enabled, then an offset K is added to the kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * space. The bottom 21 bits of this offset are zero to guarantee 2MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * alignment for PA and VA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * For each pagetable level of the swapper, we know that the shift will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * be larger than 21 (for the 4KB granule case we use section maps thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * the smallest shift is actually 30) thus there is the possibility that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * KASLR can increase the number of pagetable entries by 1, so we make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * room for this extra entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Note KASLR cannot increase the number of required entries for a level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * by more than one because it increments both the virtual start and end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * addresses equally (the extra entry comes from the case where the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * address is just pushed over a boundary and the start address isn't).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #ifdef CONFIG_RANDOMIZE_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define EARLY_KASLR	(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define EARLY_KASLR	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define EARLY_ENTRIES(vstart, vend, shift) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #if SWAPPER_PGTABLE_LEVELS > 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #define EARLY_PUDS(vstart, vend) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #if SWAPPER_PGTABLE_LEVELS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #define EARLY_PMDS(vstart, vend) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #define EARLY_PAGES(vstart, vend) ( 1 			/* PGDIR page */				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			+ EARLY_PGDS((vstart), (vend)) 	/* each PGDIR needs a next level page table */	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			+ EARLY_PUDS((vstart), (vend))	/* each PUD needs a next level page table */	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			+ EARLY_PMDS((vstart), (vend)))	/* each PMD needs a next level page table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #define IDMAP_DIR_SIZE		(IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) /* Initial memory map size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #if ARM64_SWAPPER_USES_SECTION_MAPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define SWAPPER_BLOCK_SHIFT	SECTION_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define SWAPPER_BLOCK_SIZE	SECTION_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define SWAPPER_TABLE_SHIFT	PUD_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #define SWAPPER_BLOCK_SHIFT	PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #define SWAPPER_BLOCK_SIZE	PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define SWAPPER_TABLE_SHIFT	PMD_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* The size of the initial kernel direct mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define SWAPPER_INIT_MAP_SIZE	(_AC(1, UL) << SWAPPER_TABLE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * Initial memory map attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define SWAPPER_PTE_FLAGS	(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define SWAPPER_PMD_FLAGS	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #if ARM64_SWAPPER_USES_SECTION_MAPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define SWAPPER_MM_MMUFLAGS	(PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define SWAPPER_MM_MMUFLAGS	(PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * To make optimal use of block mappings when laying out the linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * mapping, round down the base of physical memory to a size that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * (64k granule), or a multiple that can be mapped using contiguous bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * in the page tables: 32 * PMD_SIZE (16k granule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #if defined(CONFIG_ARM64_4K_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define ARM64_MEMSTART_SHIFT		PUD_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #elif defined(CONFIG_ARM64_16K_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define ARM64_MEMSTART_SHIFT		(PMD_SHIFT + 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define ARM64_MEMSTART_SHIFT		PMD_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * sparsemem vmemmap imposes an additional requirement on the alignment of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * memstart_addr, due to the fact that the base of the vmemmap region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * has a direct correspondence, and needs to appear sufficiently aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * in the virtual address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define ARM64_MEMSTART_ALIGN	(1UL << SECTION_SIZE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define ARM64_MEMSTART_ALIGN	(1UL << ARM64_MEMSTART_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif	/* __ASM_KERNEL_PGTABLE_H */