Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_PGALLOC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_PGALLOC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #define __HAVE_ARCH_PMD_ALLOC_ONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define __HAVE_ARCH_PMD_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define __HAVE_ARCH_PGD_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm-generic/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) /* Allocate the top level pgd (page directory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * allocate the first pmd adjacent to the pgd.  This means that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * subtract a constant offset to get to it.  The pmd and pgd sizes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * arranged so that a single pmd covers 4GB (giving a full 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * process access to 8TB) so our lookups are effectively L2 for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * first 4GB of the kernel (i.e. for all ILP32 processes and all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * kernel for machines with under 4GB of memory) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 					       PGD_ALLOC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	pgd_t *actual_pgd = pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	if (likely(pgd != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #if CONFIG_PGTABLE_LEVELS == 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		actual_pgd += PTRS_PER_PGD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		/* Populate first pmd with allocated memory.  We mark it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		 * with PxD_FLAG_ATTACHED as a signal to the system that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		 * pmd entry may not be cleared. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 				        PxD_FLAG_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 					PxD_FLAG_ATTACHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 			+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		/* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		 * a signal that this pmd may not be freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	spin_lock_init(pgd_spinlock(actual_pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	return actual_pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #if CONFIG_PGTABLE_LEVELS == 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	pgd -= PTRS_PER_PGD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #if CONFIG_PGTABLE_LEVELS == 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /* Three Level Page Table Support for pmd's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 			(__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		 * This is the permanent pmd attached to the pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		 * cannot free it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		 * Increment the counter to compensate for the decrement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		 * done by generic mm code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		mm_inc_nr_pmds(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	free_pages((unsigned long)pmd, PMD_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #if CONFIG_PGTABLE_LEVELS == 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	/* preserve the gateway marker if this is the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 * the permanent pmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		set_pmd(pmd, __pmd((PxD_FLAG_PRESENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 				PxD_FLAG_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 				PxD_FLAG_ATTACHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define pmd_populate(mm, pmd, pte_page) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	pmd_populate_kernel(mm, pmd, page_address(pte_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define pmd_pgtable(pmd) pmd_page(pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #endif