Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) struct page_change_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	pgprot_t set_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	pgprot_t clear_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct page_change_data *cdata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	pte_t pte = READ_ONCE(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	pte = clear_pte_bit(pte, cdata->clear_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	pte = set_pte_bit(pte, cdata->set_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	set_pte(ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * This function assumes that the range is mapped with PAGE_SIZE pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static int __change_memory_common(unsigned long start, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 				pgprot_t set_mask, pgprot_t clear_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct page_change_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	data.set_mask = set_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	data.clear_mask = clear_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 					&data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	flush_tlb_kernel_range(start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static int change_memory_common(unsigned long addr, int numpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 				pgprot_t set_mask, pgprot_t clear_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	unsigned long start = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	unsigned long size = PAGE_SIZE * numpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	unsigned long end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (!PAGE_ALIGNED(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		start &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * Kernel VA mappings are always live, and splitting live section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * mappings into page mappings may cause TLB conflicts. This means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 * we have to ensure that changing the permission bits of the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 * we are operating on does not result in such splitting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * Those are guaranteed to consist entirely of page mappings, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 * splitting is never needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 * So check whether the [addr, addr + size) interval is entirely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * covered by precisely one VM area that has the VM_ALLOC flag set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	area = find_vm_area((void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (!area ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	    end > (unsigned long)area->addr + area->size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	    !(area->flags & VM_ALLOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (!numpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 * If we are manipulating read-only permissions, apply the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 * change to the linear mapping of the pages that back this VM area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			    pgprot_val(clear_mask) == PTE_RDONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		for (i = 0; i < area->nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 			__change_memory_common((u64)page_address(area->pages[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 					       PAGE_SIZE, set_mask, clear_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	 * Get rid of potentially aliasing lazily unmapped vm areas that may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	 * have permissions set that deviate from the ones we are setting here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	vm_unmap_aliases();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	return __change_memory_common(start, size, set_mask, clear_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int set_memory_ro(unsigned long addr, int numpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	return change_memory_common(addr, numpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 					__pgprot(PTE_RDONLY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 					__pgprot(PTE_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int set_memory_rw(unsigned long addr, int numpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return change_memory_common(addr, numpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 					__pgprot(PTE_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 					__pgprot(PTE_RDONLY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int set_memory_nx(unsigned long addr, int numpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	return change_memory_common(addr, numpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 					__pgprot(PTE_PXN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 					__pgprot(PTE_MAYBE_GP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int set_memory_x(unsigned long addr, int numpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	return change_memory_common(addr, numpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 					__pgprot(PTE_MAYBE_GP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 					__pgprot(PTE_PXN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int set_memory_valid(unsigned long addr, int numpages, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return __change_memory_common(addr, PAGE_SIZE * numpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 					__pgprot(PTE_VALID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 					__pgprot(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		return __change_memory_common(addr, PAGE_SIZE * numpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 					__pgprot(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 					__pgprot(PTE_VALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int set_direct_map_invalid_noflush(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct page_change_data data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		.set_mask = __pgprot(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		.clear_mask = __pgprot(PTE_VALID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (!rodata_full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return apply_to_page_range(&init_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 				   (unsigned long)page_address(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				   PAGE_SIZE, change_page_range, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int set_direct_map_default_noflush(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct page_change_data data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		.clear_mask = __pgprot(PTE_RDONLY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	if (!rodata_full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	return apply_to_page_range(&init_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 				   (unsigned long)page_address(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 				   PAGE_SIZE, change_page_range, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void __kernel_map_pages(struct page *page, int numpages, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	if (!debug_pagealloc_enabled() && !rodata_full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	set_memory_valid((unsigned long)page_address(page), numpages, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * This function is used to determine if a linear map page has been marked as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * not-valid. Walk the page table and check the PTE_VALID bit. This is based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * on kern_addr_valid(), which almost does what we need.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * Because this is only called on the kernel linear map,  p?d_sect() implies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * p?d_present(). When debug_pagealloc is enabled, sections mappings are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) bool kernel_page_present(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	pgd_t *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	p4d_t *p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	pud_t *pudp, pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	pmd_t *pmdp, pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	unsigned long addr = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (!debug_pagealloc_enabled() && !rodata_full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	pgdp = pgd_offset_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if (pgd_none(READ_ONCE(*pgdp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	p4dp = p4d_offset(pgdp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (p4d_none(READ_ONCE(*p4dp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	pudp = pud_offset(p4dp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (pud_none(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (pud_sect(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	pmdp = pmd_offset(pudp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (pmd_none(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (pmd_sect(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	ptep = pte_offset_kernel(pmdp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	return pte_valid(READ_ONCE(*ptep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }