Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  linux/mm/memory.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * demand-loading started 01.12.91 - seems it is high on the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * things wanted, and it should be easy to implement. - Linus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * pages started 02.12.91, seems to work. - Linus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * would have taken more than the 6M I have free, but it worked well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * far as I could see.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * Real VM (paging to/from disk) started 18.12.91. Much more work and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * thought has to go into this. Oh, well..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *		Found it. Everything seems to work now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * 20.12.91  -  Ok, making the swap-device changeable like the root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * 05.04.94  -  Multi-page memory management added for v1.1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *		(Gerhard.Wichert@pdb.siemens.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/sched/coredump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/sched/numa_balancing.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/memremap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/ksm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/rmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <linux/delayacct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <linux/pfn_t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <linux/memcontrol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <linux/mmu_notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <linux/swapops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <linux/migrate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <linux/userfaultfd_k.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #include <linux/oom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #include <trace/hooks/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #include <trace/events/kmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #include "pgalloc-track.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #include <trace/hooks/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #include <trace/events/pagefault.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #ifndef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) /* use the per-pgdat data instead for discontigmem - mbligh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) unsigned long max_mapnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) EXPORT_SYMBOL(max_mapnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) struct page *mem_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) EXPORT_SYMBOL(mem_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  * A number of key systems in x86 including ioremap() rely on the assumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  * that high_memory defines the upper bound on direct map memory, then end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  * and ZONE_HIGHMEM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) void *high_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) EXPORT_SYMBOL(high_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * Randomize the address space (stacks, mmaps, brk, etc.).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  *   as ancient (libc5 based) binaries can segfault. )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) int randomize_va_space __read_mostly =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #ifdef CONFIG_COMPAT_BRK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 					1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 					2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #ifndef arch_faults_on_old_pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static inline bool arch_faults_on_old_pte(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 * Those arches which don't have hw access flag feature need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	 * implement their own helper. By default, "true" means pagefault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	 * will be hit on old pte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #ifndef arch_wants_old_prefaulted_pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) static inline bool arch_wants_old_prefaulted_pte(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	 * some architectures, even if it's performed in hardware. By
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	 * default, "false" means prefaulted entries will be 'young'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static int __init disable_randmaps(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	randomize_va_space = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) __setup("norandmaps", disable_randmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) unsigned long zero_pfn __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) EXPORT_SYMBOL(zero_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) unsigned long highest_memmap_pfn __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static int __init init_zero_pfn(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) early_initcall(init_zero_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * Only trace rss_stat when there is a 512kb cross over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * Smaller changes may be lost unless every small change is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  * crossing into or returning to a 512kb boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define TRACE_MM_COUNTER_THRESHOLD 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) void mm_trace_rss_stat(struct mm_struct *mm, int member, long count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		       long value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	long thresh_mask = ~(TRACE_MM_COUNTER_THRESHOLD - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	/* Threshold roll-over, trace it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	if ((count & thresh_mask) != ((count - value) & thresh_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		trace_rss_stat(mm, member, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) EXPORT_SYMBOL_GPL(mm_trace_rss_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #if defined(SPLIT_RSS_COUNTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) void sync_mm_rss(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	for (i = 0; i < NR_MM_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		if (current->rss_stat.count[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			add_mm_counter(mm, i, current->rss_stat.count[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			current->rss_stat.count[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	current->rss_stat.events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	struct task_struct *task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	if (likely(task->mm == mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		task->rss_stat.count[member] += val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		add_mm_counter(mm, member, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) /* sync counter once per 64 page faults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) #define TASK_RSS_EVENTS_THRESH	(64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static void check_sync_rss_stat(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	if (unlikely(task != current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		sync_mm_rss(task->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #else /* SPLIT_RSS_COUNTING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static void check_sync_rss_stat(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) #endif /* SPLIT_RSS_COUNTING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  * Note: this doesn't free the actual pages themselves. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  * has been handled earlier when unmapping all the memory regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 			   unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	pgtable_t token = pmd_pgtable(*pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	pmd_clear(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	pte_free_tlb(tlb, token, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	mm_dec_nr_ptes(tlb->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 				unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 				unsigned long floor, unsigned long ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	start = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		next = pmd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		if (pmd_none_or_clear_bad(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		free_pte_range(tlb, pmd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	} while (pmd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	start &= PUD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	if (start < floor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	if (ceiling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		ceiling &= PUD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		if (!ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	if (end - 1 > ceiling - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	pmd = pmd_offset(pud, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	pud_clear(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	pmd_free_tlb(tlb, pmd, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	mm_dec_nr_pmds(tlb->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 				unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 				unsigned long floor, unsigned long ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	start = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		next = pud_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		if (pud_none_or_clear_bad(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	} while (pud++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	start &= P4D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	if (start < floor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (ceiling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		ceiling &= P4D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		if (!ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	if (end - 1 > ceiling - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	pud = pud_offset(p4d, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	p4d_clear(p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	pud_free_tlb(tlb, pud, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	mm_dec_nr_puds(tlb->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 				unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 				unsigned long floor, unsigned long ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	start = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		next = p4d_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		if (p4d_none_or_clear_bad(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	} while (p4d++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	start &= PGDIR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	if (start < floor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (ceiling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		ceiling &= PGDIR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		if (!ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	if (end - 1 > ceiling - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	p4d = p4d_offset(pgd, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	pgd_clear(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	p4d_free_tlb(tlb, p4d, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * This function frees user-level page tables of a process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) void free_pgd_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			unsigned long floor, unsigned long ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	 * The next few lines have given us lots of grief...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	 * Why are we testing PMD* at this top level?  Because often
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	 * there will be no work to do at all, and we'd prefer not to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	 * go all the way down to the bottom just to discover that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	 * Why all these "- 1"s?  Because 0 represents both the bottom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	 * of the address space and the top of it (using -1 for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	 * top wouldn't help much: the masks would do the wrong thing).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	 * The rule is that addr 0 and floor 0 refer to the bottom of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	 * the address space, but end 0 and ceiling 0 refer to the top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	 * that end 0 case should be mythical).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	 * Wherever addr is brought up or ceiling brought down, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	 * be careful to reject "the opposite 0" before it confuses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	 * subsequent tests.  But what about where end is brought down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	 * by PMD_SIZE below? no, end can't go down to 0 there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	 * Whereas we round start (addr) and ceiling down, by different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	 * masks at different levels, in order to test whether a table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	 * now has no other vmas using it, so can be freed, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	 * bother to round floor or end up - the tests don't need that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	addr &= PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	if (addr < floor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		addr += PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (ceiling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		ceiling &= PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		if (!ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	if (end - 1 > ceiling - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		end -= PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	if (addr > end - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	 * We add page table cache pages with PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	 * (see pte_free_tlb()), flush the tlb if we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	tlb_change_page_size(tlb, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	pgd = pgd_offset(tlb->mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		next = pgd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		if (pgd_none_or_clear_bad(pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	} while (pgd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		unsigned long floor, unsigned long ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	while (vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		struct vm_area_struct *next = vma->vm_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		unsigned long addr = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		 * Hide vma from rmap and truncate_pagecache before freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		 * pgtables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		vm_write_begin(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		unlink_anon_vmas(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		vm_write_end(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		unlink_file_vma(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		if (is_vm_hugetlb_page(vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 				floor, next ? next->vm_start : ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			 * Optimization: gather nearby vmas into one call down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			       && !is_vm_hugetlb_page(next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 				vma = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 				next = vma->vm_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 				vm_write_begin(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 				unlink_anon_vmas(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 				vm_write_end(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 				unlink_file_vma(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			free_pgd_range(tlb, addr, vma->vm_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 				floor, next ? next->vm_start : ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		vma = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	pgtable_t new = pte_alloc_one(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	 * Ensure all pte setup (eg. pte page lock and page clearing) are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	 * visible before the pte is made visible to other CPUs by being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	 * put into page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	 * The other side of the story is the pointer chasing in the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	 * table walking code (when walking the page table without locking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	 * ie. most of the time). Fortunately, these data accesses consist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * of a chain of data-dependent loads, meaning most CPUs (alpha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 * being the notable exception) will already guarantee loads are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 * seen in-order. See the alpha page table accessors for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	 * smp_rmb() barriers in page table walking code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	ptl = pmd_lock(mm, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		mm_inc_nr_ptes(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		pmd_populate(mm, pmd, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		pte_free(mm, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) int __pte_alloc_kernel(pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	pte_t *new = pte_alloc_one_kernel(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	smp_wmb(); /* See comment in __pte_alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	spin_lock(&init_mm.page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		pmd_populate_kernel(&init_mm, pmd, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	spin_unlock(&init_mm.page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		pte_free_kernel(&init_mm, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) static inline void init_rss_vec(int *rss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	if (current->mm == mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		sync_mm_rss(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	for (i = 0; i < NR_MM_COUNTERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		if (rss[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			add_mm_counter(mm, i, rss[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * This function is called to print an error when a bad pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  * is found. For example, we might have a PFN-mapped pte in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  * a region that doesn't allow it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  * The calling function must still handle the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			  pte_t pte, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	p4d_t *p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	pud_t *pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	pmd_t *pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	static unsigned long resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	static unsigned long nr_shown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	static unsigned long nr_unshown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	 * Allow a burst of 60 reports, then keep quiet for that minute;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	 * or allow a steady drip of one report per second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	if (nr_shown == 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		if (time_before(jiffies, resume)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			nr_unshown++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		if (nr_unshown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 				 nr_unshown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			nr_unshown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		nr_shown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	if (nr_shown++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		resume = jiffies + 60 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	index = linear_page_index(vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		 current->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		dump_page(page, "bad pte");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		 (void *)addr, READ_ONCE(vma->vm_flags), vma->anon_vma, mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		 vma->vm_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		 mapping ? mapping->a_ops->readpage : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  * __vm_normal_page -- This function gets the "struct page" associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * a pte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  * "Special" mappings do not wish to be associated with a "struct page" (either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  * it doesn't exist, or it exists but they don't want to touch it). In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  * case, NULL is returned here. "Normal" mappings do have a struct page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  * pte bit, in which case this function is trivial. Secondly, an architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  * may not have a spare pte bit, which requires a more complicated scheme,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  * described below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  * special mapping (even if there are underlying and valid "struct pages").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  * COWed pages of a VM_PFNMAP are always normal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604)  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605)  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  * mapping will always honor the rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * And for normal mappings this is false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  * This restricts such mappings to be a linear translation from virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  * to pfn. To get around this restriction, we allow arbitrary mappings so long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  * as the vma is not a COW mapping; in that case, we know that all ptes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  * special (because none can have been COWed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621)  * page" backing, however the difference is that _all_ pages with a struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  * page (that is, those where pfn_valid is true) are refcounted and considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  * normal pages by the VM. The disadvantage is that pages are refcounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  * (which can be slower and simply not an option for some PFNMAP users). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  * advantage is that we don't have to follow the strict linearity rule of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  * PFNMAP mappings in order to support COWable mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			      pte_t pte, unsigned long vma_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	unsigned long pfn = pte_pfn(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		if (likely(!pte_special(pte)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			goto check_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		if (vma->vm_ops && vma->vm_ops->find_special_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 			return vma->vm_ops->find_special_page(vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		if (is_zero_pfn(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		if (pte_devmap(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		print_bad_pte(vma, addr, pte, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	 * This part should never get called when CONFIG_SPECULATIVE_PAGE_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	 * is set. This is mainly because we can't rely on vm_start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		if (vma_flags & VM_MIXEDMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 			unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			if (pfn == vma->vm_pgoff + off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			if (!is_cow_mapping(vma_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	if (is_zero_pfn(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) check_pfn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (unlikely(pfn > highest_memmap_pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		print_bad_pte(vma, addr, pte, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	 * NOTE! We still have PageReserved() pages in the page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	 * eg. VDSO mappings can cause them to exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	return pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 				pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	unsigned long pfn = pmd_pfn(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	 * There is no pmd_special() but there may be special pmds, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	 * in a direct-access (dax) mapping, so let's just replicate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		if (vma->vm_flags & VM_MIXEDMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			if (pfn == vma->vm_pgoff + off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			if (!is_cow_mapping(vma->vm_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (pmd_devmap(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	if (is_huge_zero_pmd(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (unlikely(pfn > highest_memmap_pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	 * NOTE! We still have PageReserved() pages in the page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	 * eg. VDSO mappings can cause them to exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	return pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731)  * copy one vm_area from one task to the other. Assumes the page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732)  * already present in the new task to be cleared in the whole range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)  * covered by this vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	unsigned long vm_flags = dst_vma->vm_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	pte_t pte = *src_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	swp_entry_t entry = pte_to_swp_entry(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (likely(!non_swap_entry(entry))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		if (swap_duplicate(entry) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			return entry.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		/* make sure dst_mm is on swapoff's mmlist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		if (unlikely(list_empty(&dst_mm->mmlist))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			spin_lock(&mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			if (list_empty(&dst_mm->mmlist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 				list_add(&dst_mm->mmlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 						&src_mm->mmlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			spin_unlock(&mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		rss[MM_SWAPENTS]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	} else if (is_migration_entry(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		page = migration_entry_to_page(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		rss[mm_counter(page)]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		if (is_write_migration_entry(entry) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 				is_cow_mapping(vm_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			 * COW mappings require pages in both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			 * parent and child to be set to read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			make_migration_entry_read(&entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			pte = swp_entry_to_pte(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			if (pte_swp_soft_dirty(*src_pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 				pte = pte_swp_mksoft_dirty(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			if (pte_swp_uffd_wp(*src_pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 				pte = pte_swp_mkuffd_wp(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			set_pte_at(src_mm, addr, src_pte, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	} else if (is_device_private_entry(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		page = device_private_entry_to_page(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		 * Update rss count even for unaddressable pages, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		 * they should treated just like normal pages in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		 * respect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		 * We will likely want to have some new rss counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		 * for unaddressable pages, at some point. But for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		 * keep things as they are.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		rss[mm_counter(page)]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		page_dup_rmap(page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		 * We do not preserve soft-dirty information, because so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		 * far, checkpoint/restore is the only feature that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		 * requires that. And checkpoint/restore does not work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		 * when a device driver is involved (you cannot easily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		 * save and restore device driver state).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		if (is_write_device_private_entry(entry) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		    is_cow_mapping(vm_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			make_device_private_entry_read(&entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			pte = swp_entry_to_pte(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			if (pte_swp_uffd_wp(*src_pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 				pte = pte_swp_mkuffd_wp(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			set_pte_at(src_mm, addr, src_pte, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	if (!userfaultfd_wp(dst_vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		pte = pte_swp_clear_uffd_wp(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	set_pte_at(dst_mm, addr, dst_pte, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  * Copy a present and normal page if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819)  * NOTE! The usual case is that this doesn't need to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820)  * anything, and can just return a positive value. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  * will let the caller know that it can just increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  * the page refcount and re-use the pte the traditional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  * way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  * But _if_ we need to copy it because it needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  * pinned in the parent (and the child should get its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * copy rather than just a reference to the same page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  * we'll do that here and return zero to let the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829)  * know we're done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831)  * And if we need a pre-allocated page but don't yet have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832)  * one, return a negative error to let the preallocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  * code know so that it can do so outside the page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  * lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		  struct page **prealloc, pte_t pte, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	struct mm_struct *src_mm = src_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct page *new_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if (!is_cow_mapping(src_vma->vm_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	 * What we want to do is to check whether this page may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	 * have been pinned by the parent process.  If so,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	 * instead of wrprotect the pte on both sides, we copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	 * the page immediately so that we'll always guarantee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	 * the pinned page won't be randomly replaced in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	 * future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	 * The page pinning checks are just "has this mm ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	 * seen pinning", along with the (inexact) check of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	 * the page count. That might give false positives for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	 * for pinning, but it will work correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	if (likely(!atomic_read(&src_mm->has_pinned)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (likely(!page_maybe_dma_pinned(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	new_page = *prealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	if (!new_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	 * We have a prealloc page, all good!  Take it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	 * over and copy the page & arm it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	*prealloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	copy_user_highpage(new_page, page, addr, src_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	__SetPageUptodate(new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	page_add_new_anon_rmap(new_page, dst_vma, addr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	rss[mm_counter(new_page)]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	/* All done, just insert the new page copy in the child */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	pte = mk_pte(new_page, dst_vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (userfaultfd_pte_wp(dst_vma, *src_pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		/* Uffd-wp needs to be delivered to dest pte as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		pte = pte_wrprotect(pte_mkuffd_wp(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891)  * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  * is required to copy this pte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		 struct page **prealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	struct mm_struct *src_mm = src_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	unsigned long vm_flags = src_vma->vm_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	pte_t pte = *src_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	page = vm_normal_page(src_vma, addr, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 					   addr, rss, prealloc, pte, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		if (retval <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		page_dup_rmap(page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		rss[mm_counter(page)]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	 * If it's a COW mapping, write protect it both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	 * in the parent and the child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		ptep_set_wrprotect(src_mm, addr, src_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		pte = pte_wrprotect(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	 * If it's a shared mapping, mark it clean in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	 * the child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	if (vm_flags & VM_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		pte = pte_mkclean(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	pte = pte_mkold(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (!userfaultfd_wp(dst_vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		pte = pte_clear_uffd_wp(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) static inline struct page *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		   unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	struct page *new_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (!new_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		put_page(new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	return new_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	       unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	struct mm_struct *dst_mm = dst_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	struct mm_struct *src_mm = src_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	pte_t *orig_src_pte, *orig_dst_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	pte_t *src_pte, *dst_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	spinlock_t *src_ptl, *dst_ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	int progress, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	int rss[NR_MM_COUNTERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	swp_entry_t entry = (swp_entry_t){0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	struct page *prealloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	init_rss_vec(rss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	if (!dst_pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	src_pte = pte_offset_map(src_pmd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	src_ptl = pte_lockptr(src_mm, src_pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	orig_src_pte = src_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	orig_dst_pte = dst_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	arch_enter_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		 * We are holding two locks at this point - either of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		 * could generate latencies in another task on another CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		if (progress >= 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 			if (need_resched() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		if (pte_none(*src_pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			progress++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		if (unlikely(!pte_present(*src_pte))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			entry.val = copy_nonpresent_pte(dst_mm, src_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 							dst_pte, src_pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 							dst_vma, src_vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 							addr, rss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			if (entry.val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			progress += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		/* copy_present_pte() will clear `*prealloc' if consumed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				       addr, rss, &prealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		 * If we need a pre-allocated page for this pte, drop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		 * locks, allocate, and try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		if (unlikely(ret == -EAGAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		if (unlikely(prealloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			 * pre-alloc page cannot be reused by next time so as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			 * will allocate page according to address).  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			 * could only happen if one pinned pte changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 			put_page(prealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			prealloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		progress += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	arch_leave_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	spin_unlock(src_ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	pte_unmap(orig_src_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	add_mm_rss_vec(dst_mm, rss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	if (entry.val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		entry.val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	} else if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		WARN_ON_ONCE(ret != -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		if (!prealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		/* We've captured and resolved the error. Reset, try again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	if (addr != end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (unlikely(prealloc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		put_page(prealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	       unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	struct mm_struct *dst_mm = dst_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	struct mm_struct *src_mm = src_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	pmd_t *src_pmd, *dst_pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	if (!dst_pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	src_pmd = pmd_offset(src_pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		next = pmd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			|| pmd_devmap(*src_pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 					    addr, dst_vma, src_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			if (err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		if (pmd_none_or_clear_bad(src_pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				   addr, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	       unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	struct mm_struct *dst_mm = dst_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	struct mm_struct *src_mm = src_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	pud_t *src_pud, *dst_pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	if (!dst_pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	src_pud = pud_offset(src_p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		next = pud_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 			err = copy_huge_pud(dst_mm, src_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 					    dst_pud, src_pud, addr, src_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			if (err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		if (pud_none_or_clear_bad(src_pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 				   addr, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	} while (dst_pud++, src_pud++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	       unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	struct mm_struct *dst_mm = dst_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	p4d_t *src_p4d, *dst_p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	if (!dst_p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	src_p4d = p4d_offset(src_pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		next = p4d_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		if (p4d_none_or_clear_bad(src_p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 				   addr, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	pgd_t *src_pgd, *dst_pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	unsigned long addr = src_vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	unsigned long end = src_vma->vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	struct mm_struct *dst_mm = dst_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	struct mm_struct *src_mm = src_vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	struct mmu_notifier_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	bool is_cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	 * Don't copy ptes where a page fault will fill them correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	 * Fork becomes much lighter when there are big shared or private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	 * readonly mappings. The tradeoff is that copy_page_range is more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	 * efficient than faulting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	    !src_vma->anon_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (is_vm_hugetlb_page(src_vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		 * We do not free on error cases below as remove_vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		 * gets called on error from higher level routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		ret = track_pfn_copy(src_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	 * We need to invalidate the secondary MMU mappings only when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	 * there could be a permission downgrade on the ptes of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	 * parent mm. And a permission downgrade will only happen if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	 * is_cow_mapping() returns true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	is_cow = is_cow_mapping(src_vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	if (is_cow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 					0, src_vma, src_mm, addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		mmu_notifier_invalidate_range_start(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		 * Disabling preemption is not needed for the write side, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		 * the read side doesn't spin, but goes to the mmap_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		 * Use the raw variant of the seqcount_t write API to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		 * lockdep complaining about preemptibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		mmap_assert_write_locked(src_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	dst_pgd = pgd_offset(dst_mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	src_pgd = pgd_offset(src_mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		next = pgd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		if (pgd_none_or_clear_bad(src_pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 					    addr, next))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	if (is_cow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		raw_write_seqcount_end(&src_mm->write_protect_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		mmu_notifier_invalidate_range_end(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static unsigned long zap_pte_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 				struct vm_area_struct *vma, pmd_t *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 				unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 				struct zap_details *details)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	struct mm_struct *mm = tlb->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	int force_flush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	int rss[NR_MM_COUNTERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	pte_t *start_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	swp_entry_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	tlb_change_page_size(tlb, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	init_rss_vec(rss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	pte = start_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	flush_tlb_batched_pending(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	arch_enter_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		pte_t ptent = *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		if (pte_none(ptent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		if (pte_present(ptent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			page = vm_normal_page(vma, addr, ptent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			if (unlikely(details) && page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 				 * unmap_shared_mapping_pages() wants to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 				 * invalidate cache without truncating:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 				 * unmap shared but keep private pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 				if (details->check_mapping &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 				    details->check_mapping != page_rmapping(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			ptent = ptep_get_and_clear_full(mm, addr, pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 							tlb->fullmm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 			tlb_remove_tlb_entry(tlb, pte, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 			if (unlikely(!page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			if (!PageAnon(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 				if (pte_dirty(ptent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 					force_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 					set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 				if (pte_young(ptent) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 				    likely(!(vma->vm_flags & VM_SEQ_READ)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 					mark_page_accessed(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 			rss[mm_counter(page)]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			page_remove_rmap(page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 			if (unlikely(page_mapcount(page) < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 				print_bad_pte(vma, addr, ptent, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			if (unlikely(__tlb_remove_page(tlb, page)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 				     lru_cache_disabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 				force_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 				addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		entry = pte_to_swp_entry(ptent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		if (is_device_private_entry(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			struct page *page = device_private_entry_to_page(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			if (unlikely(details && details->check_mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 				 * unmap_shared_mapping_pages() wants to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 				 * invalidate cache without truncating:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 				 * unmap shared but keep private pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 				if (details->check_mapping !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 				    page_rmapping(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			rss[mm_counter(page)]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			page_remove_rmap(page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		/* If details->check_mapping, we leave swap entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		if (unlikely(details))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		if (!non_swap_entry(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			rss[MM_SWAPENTS]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		else if (is_migration_entry(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			page = migration_entry_to_page(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			rss[mm_counter(page)]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		if (unlikely(!free_swap_and_cache(entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			print_bad_pte(vma, addr, ptent, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	} while (pte++, addr += PAGE_SIZE, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	add_mm_rss_vec(mm, rss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	arch_leave_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	/* Do the actual TLB flush before dropping ptl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	if (force_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		tlb_flush_mmu_tlbonly(tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	pte_unmap_unlock(start_pte, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	 * If we forced a TLB flush (either due to running out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	 * batch buffers or because we needed to flush dirty TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	 * entries before releasing the ptl), free the batched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	 * memory too. Restart if we didn't do everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	if (force_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		force_flush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		tlb_flush_mmu(tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	if (addr != end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 				struct vm_area_struct *vma, pud_t *pud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 				unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 				struct zap_details *details)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		next = pmd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 			if (next - addr != HPAGE_PMD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 				__split_huge_pmd(vma, pmd, addr, false, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 			else if (zap_huge_pmd(tlb, vma, pmd, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 				goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		} else if (details && details->single_page &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			   PageTransCompound(details->single_page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 			 * Take and drop THP pmd lock so that we cannot return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 			 * but not yet decremented compound_mapcount().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		 * Here there can be other concurrent MADV_DONTNEED or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		 * trans huge page faults running, and if the pmd is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		 * none or trans huge it can change under us. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		 * because MADV_DONTNEED holds the mmap_lock in read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		 * mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	} while (pmd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 				struct vm_area_struct *vma, p4d_t *p4d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 				unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 				struct zap_details *details)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		next = pud_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 			if (next - addr != HPAGE_PUD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 				mmap_assert_locked(tlb->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 				split_huge_pud(vma, pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			} else if (zap_huge_pud(tlb, vma, pud, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 				goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 			/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		if (pud_none_or_clear_bad(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	} while (pud++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 				struct vm_area_struct *vma, pgd_t *pgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 				unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 				struct zap_details *details)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		next = p4d_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		if (p4d_none_or_clear_bad(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	} while (p4d++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) void unmap_page_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			     struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 			     unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			     struct zap_details *details)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	BUG_ON(addr >= end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	vm_write_begin(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	tlb_start_vma(tlb, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	pgd = pgd_offset(vma->vm_mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		next = pgd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		if (pgd_none_or_clear_bad(pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	} while (pgd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	tlb_end_vma(tlb, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	vm_write_end(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) static void unmap_single_vma(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		struct vm_area_struct *vma, unsigned long start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		unsigned long end_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		struct zap_details *details)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	unsigned long start = max(vma->vm_start, start_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (start >= vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	end = min(vma->vm_end, end_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (end <= vma->vm_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	if (vma->vm_file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		uprobe_munmap(vma, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	if (unlikely(vma->vm_flags & VM_PFNMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		untrack_pfn(vma, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	if (start != end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		if (unlikely(is_vm_hugetlb_page(vma))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			 * It is undesirable to test vma->vm_file as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			 * should be non-null for valid hugetlb area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			 * However, vm_file will be NULL in the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 			 * cleanup path of mmap_region. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			 * hugetlbfs ->mmap method fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			 * mmap_region() nullifies vma->vm_file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			 * before calling this function to clean up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 			 * Since no pte has actually been setup, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			 * safe to do nothing in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 			if (vma->vm_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 				i_mmap_lock_write(vma->vm_file->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 				i_mmap_unlock_write(vma->vm_file->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			unmap_page_range(tlb, vma, start, end, details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)  * unmap_vmas - unmap a range of memory covered by a list of vma's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)  * @tlb: address of the caller's struct mmu_gather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)  * @vma: the starting vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  * @start_addr: virtual address at which to start unmapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  * @end_addr: virtual address at which to end unmapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  * Unmap all pages in the vma list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)  * Only addresses between `start' and `end' will be unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)  * The VMA list must be sorted in ascending virtual address order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)  * unmap_vmas() assumes that the caller will flush the whole unmapped address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)  * range after unmap_vmas() returns.  So the only responsibility here is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)  * drops the lock and schedules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) void unmap_vmas(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		struct vm_area_struct *vma, unsigned long start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		unsigned long end_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	struct mmu_notifier_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 				start_addr, end_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	mmu_notifier_invalidate_range_start(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	mmu_notifier_invalidate_range_end(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)  * zap_page_range - remove user pages in a given range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)  * @vma: vm_area_struct holding the applicable pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  * @start: starting address of pages to zap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)  * @size: number of bytes to zap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)  * Caller must protect the VMA list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) void zap_page_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	struct mmu_notifier_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	struct mmu_gather tlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	lru_add_drain();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 				start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	update_hiwater_rss(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	mmu_notifier_invalidate_range_start(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		unmap_single_vma(&tlb, vma, start, range.end, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	mmu_notifier_invalidate_range_end(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	tlb_finish_mmu(&tlb, start, range.end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)  * zap_page_range_single - remove user pages in a given range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)  * @vma: vm_area_struct holding the applicable pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)  * @address: starting address of pages to zap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)  * @size: number of bytes to zap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)  * @details: details of shared cache invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)  * The range must fit into one VMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		unsigned long size, struct zap_details *details)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	struct mmu_notifier_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	struct mmu_gather tlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	lru_add_drain();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 				address, address + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	update_hiwater_rss(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	mmu_notifier_invalidate_range_start(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	unmap_single_vma(&tlb, vma, address, range.end, details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	mmu_notifier_invalidate_range_end(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	tlb_finish_mmu(&tlb, address, range.end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)  * zap_vma_ptes - remove ptes mapping the vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)  * @vma: vm_area_struct holding ptes to be zapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)  * @address: starting address of pages to zap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)  * @size: number of bytes to zap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)  * The entire address range must be fully contained within the vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	if (address < vma->vm_start || address + size > vma->vm_end ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	    		!(vma->vm_flags & VM_PFNMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	zap_page_range_single(vma, address, size, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) EXPORT_SYMBOL_GPL(zap_vma_ptes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	pgd = pgd_offset(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	p4d = p4d_alloc(mm, pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	if (!p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	pud = pud_alloc(mm, p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	if (!pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	pmd = pmd_alloc(mm, pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	VM_BUG_ON(pmd_trans_huge(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 			spinlock_t **ptl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	pmd_t *pmd = walk_to_pmd(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) static int validate_page_before_insert(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			unsigned long addr, struct page *page, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	if (!pte_none(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	/* Ok, finally just insert the thing.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	inc_mm_counter_fast(mm, mm_counter_file(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	page_add_file_rmap(page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	set_pte_at(mm, addr, pte, mk_pte(page, prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)  * This is the old fallback for page remapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)  * For historical reasons, it only allows reserved pages. Only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)  * old drivers should use this, and they needed to mark their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)  * pages reserved for the old functions anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) static int insert_page(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			struct page *page, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	retval = validate_page_before_insert(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	pte = get_locked_pte(mm, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	pte_unmap_unlock(pte, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) #ifdef pte_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			unsigned long addr, struct page *page, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	if (!page_count(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	err = validate_page_before_insert(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	return insert_page_into_pte_locked(mm, pte, addr, page, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) /* insert_pages() amortizes the cost of spinlock operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)  * when inserting pages in a loop. Arch *must* define pte_index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			struct page **pages, unsigned long *num, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	pmd_t *pmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	pte_t *start_pte, *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	spinlock_t *pte_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	struct mm_struct *const mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	unsigned long curr_page_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	unsigned long remaining_pages_total = *num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	unsigned long pages_to_write_in_pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) more:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	pmd = walk_to_pmd(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	pages_to_write_in_pmd = min_t(unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	/* Allocate the PTE if necessary; takes PMD lock once only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	if (pte_alloc(mm, pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	while (pages_to_write_in_pmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		int pte_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			int err = insert_page_in_batch_locked(mm, pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 				addr, pages[curr_page_idx], prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 				pte_unmap_unlock(start_pte, pte_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 				ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 				remaining_pages_total -= pte_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 			++curr_page_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		pte_unmap_unlock(start_pte, pte_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		pages_to_write_in_pmd -= batch_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		remaining_pages_total -= batch_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	if (remaining_pages_total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		goto more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	*num = remaining_pages_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) #endif  /* ifdef pte_index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)  * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)  * @addr: target start user address of these pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)  * @pages: source kernel pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)  * @num: in: number of pages to map. out: number of pages that were *not*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  * mapped. (0 means all pages were successfully mapped).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)  * Preferred over vm_insert_page() when inserting multiple pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)  * In case of error, we may have mapped a subset of the provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)  * pages. It is the caller's responsibility to account for this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  * The same restrictions apply as in vm_insert_page().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 			struct page **pages, unsigned long *num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) #ifdef pte_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		BUG_ON(mmap_read_trylock(vma->vm_mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		BUG_ON(vma->vm_flags & VM_PFNMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		vma->vm_flags |= VM_MIXEDMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	/* Defer page refcount checking till we're about to map that page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	unsigned long idx = 0, pgcount = *num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	for (; idx < pgcount; ++idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	*num = pgcount - idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) #endif  /* ifdef pte_index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) EXPORT_SYMBOL(vm_insert_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)  * vm_insert_page - insert single page into user vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)  * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)  * @addr: target user address of this page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)  * @page: source kernel page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)  * This allows drivers to insert individual pages they've allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)  * into a user vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)  * The page has to be a nice clean _individual_ kernel allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)  * If you allocate a compound page, you need to have marked it as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)  * such (__GFP_COMP), or manually just split the page up yourself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)  * (see split_page()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)  * NOTE! Traditionally this was done with "remap_pfn_range()" which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)  * took an arbitrary page protection parameter. This doesn't allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)  * that. Your vma protection will have to be set up correctly, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)  * means that if you want a shared writable mapping, you'd better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)  * ask for a shared writable mapping!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)  * The page does not need to be reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)  * Usually this function is called from f_op->mmap() handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)  * Caller must set VM_MIXEDMAP on vma if it wants to call this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)  * function from other places, for example from page-fault handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)  * Return: %0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 			struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	if (addr < vma->vm_start || addr >= vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if (!page_count(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		BUG_ON(mmap_read_trylock(vma->vm_mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		BUG_ON(vma->vm_flags & VM_PFNMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		vma->vm_flags |= VM_MIXEDMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	return insert_page(vma, addr, page, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) EXPORT_SYMBOL(vm_insert_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)  * __vm_map_pages - maps range of kernel pages into user vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)  * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)  * @pages: pointer to array of source kernel pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)  * @num: number of pages in page array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)  * @offset: user's requested vm_pgoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)  * This allows drivers to map range of kernel pages into a user vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)  * Return: 0 on success and error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 				unsigned long num, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	unsigned long count = vma_pages(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	unsigned long uaddr = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	/* Fail if the user requested offset is beyond the end of the object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	if (offset >= num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	/* Fail if the user requested size exceeds available object size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	if (count > num - offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		uaddr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)  * vm_map_pages - maps range of kernel pages starts with non zero offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)  * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)  * @pages: pointer to array of source kernel pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)  * @num: number of pages in page array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)  * Maps an object consisting of @num pages, catering for the user's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)  * requested vm_pgoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)  * If we fail to insert any page into the vma, the function will return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)  * immediately leaving any previously inserted pages present.  Callers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)  * from the mmap handler may immediately return the error as their caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)  * will destroy the vma, removing any successfully inserted pages. Other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)  * callers should make their own arrangements for calling unmap_region().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)  * Context: Process context. Called by mmap handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)  * Return: 0 on success and error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 				unsigned long num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) EXPORT_SYMBOL(vm_map_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)  * vm_map_pages_zero - map range of kernel pages starts with zero offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)  * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)  * @pages: pointer to array of source kernel pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)  * @num: number of pages in page array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  * Similar to vm_map_pages(), except that it explicitly sets the offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)  * to 0. This function is intended for the drivers that did not consider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)  * vm_pgoff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)  * Context: Process context. Called by mmap handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)  * Return: 0 on success and error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 				unsigned long num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	return __vm_map_pages(vma, pages, num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) EXPORT_SYMBOL(vm_map_pages_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 			pfn_t pfn, pgprot_t prot, bool mkwrite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	pte_t *pte, entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	pte = get_locked_pte(mm, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	if (!pte_none(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		if (mkwrite) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			 * For read faults on private mappings the PFN passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 			 * in may not match the PFN we have mapped if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 			 * mapped PFN is a writeable COW page.  In the mkwrite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 			 * case we are creating a writable PTE for a shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 			 * mapping and we expect the PFNs to match. If they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 			 * don't match, we are likely racing with block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 			 * allocation and mapping invalidation so just skip the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 			 * update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 				goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 			entry = pte_mkyoung(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 			entry = maybe_mkwrite(pte_mkdirty(entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 							vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 				update_mmu_cache(vma, addr, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	/* Ok, finally just insert the thing.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	if (pfn_t_devmap(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	if (mkwrite) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		entry = pte_mkyoung(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		entry = maybe_mkwrite(pte_mkdirty(entry), vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	set_pte_at(mm, addr, pte, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	pte_unmap_unlock(pte, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	return VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)  * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)  * @addr: target user address of this page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)  * @pfn: source kernel pfn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)  * @pgprot: pgprot flags for the inserted page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)  * This is exactly like vmf_insert_pfn(), except that it allows drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)  * to override pgprot on a per-page basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)  * This only makes sense for IO mappings, and it makes no sense for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)  * COW mappings.  In general, using multiple vmas is preferable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)  * impractical.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)  * See vmf_insert_mixed_prot() for a discussion of the implication of using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)  * a value of @pgprot different from that of @vma->vm_page_prot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)  * Context: Process context.  May allocate using %GFP_KERNEL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)  * Return: vm_fault_t value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 			unsigned long pfn, pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	 * Technically, architectures with pte_special can avoid all these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	 * restrictions (same for remap_pfn_range).  However we would like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	 * consistency in testing and feature parity among all, so we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	 * try to keep these invariants in place for everybody.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 						(VM_PFNMAP|VM_MIXEDMAP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	if (addr < vma->vm_start || addr >= vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	if (!pfn_modify_allowed(pfn, pgprot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 			false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) EXPORT_SYMBOL(vmf_insert_pfn_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)  * vmf_insert_pfn - insert single pfn into user vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)  * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)  * @addr: target user address of this page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)  * @pfn: source kernel pfn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)  * Similar to vm_insert_page, this allows drivers to insert individual pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)  * they've allocated into a user vma. Same comments apply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)  * This function should only be called from a vm_ops->fault handler, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)  * in that case the handler should return the result of this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)  * vma cannot be a COW mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)  * As this is called only for pages that do not currently exist, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)  * do not need to flush old virtual caches or the TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)  * Context: Process context.  May allocate using %GFP_KERNEL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)  * Return: vm_fault_t value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 			unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) EXPORT_SYMBOL(vmf_insert_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	/* these checks mirror the abort conditions in vm_normal_page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	if (vma->vm_flags & VM_MIXEDMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	if (pfn_t_devmap(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	if (pfn_t_special(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		bool mkwrite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	BUG_ON(!vm_mixed_ok(vma, pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	if (addr < vma->vm_start || addr >= vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	track_pfn_insert(vma, &pgprot, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	 * If we don't have pte special, then we have to use the pfn_valid()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	 * refcount the page if pfn_valid is true (hence insert_page rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	 * without pte special, it would there be refcounted as a normal page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		 * At this point we are committed to insert_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		 * regardless of whether the caller specified flags that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		 * result in pfn_t_has_page() == false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		page = pfn_to_page(pfn_t_to_pfn(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 		err = insert_page(vma, addr, page, pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	if (err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	if (err < 0 && err != -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	return VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)  * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)  * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)  * @addr: target user address of this page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)  * @pfn: source kernel pfn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)  * @pgprot: pgprot flags for the inserted page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)  * This is exactly like vmf_insert_mixed(), except that it allows drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)  * to override pgprot on a per-page basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)  * Typically this function should be used by drivers to set caching- and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)  * encryption bits different than those of @vma->vm_page_prot, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)  * the caching- or encryption mode may not be known at mmap() time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)  * This is ok as long as @vma->vm_page_prot is not used by the core vm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)  * to set caching and encryption bits for those vmas (except for COW pages).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)  * This is ensured by core vm only modifying these page table entries using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)  * functions that don't touch caching- or encryption bits, using pte_modify()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)  * if needed. (See for example mprotect()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)  * Also when new page-table entries are created, this is only done using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)  * fault() callback, and never using the value of vma->vm_page_prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)  * except for page-table entries that point to anonymous pages as the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)  * of COW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)  * Context: Process context.  May allocate using %GFP_KERNEL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)  * Return: vm_fault_t value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 				 pfn_t pfn, pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) EXPORT_SYMBOL(vmf_insert_mixed_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		pfn_t pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) EXPORT_SYMBOL(vmf_insert_mixed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)  *  If the insertion of PTE failed because someone else already added a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)  *  different entry in the mean time, we treat that as success as we assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)  *  the same entry was actually inserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		unsigned long addr, pfn_t pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)  * maps a range of physical memory into the requested pages. the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)  * mappings are removed. any references to nonexistent pages results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)  * in null mappings (currently treated as "copy-on-access")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 			unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	pte_t *pte, *mapped_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	arch_enter_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		BUG_ON(!pte_none(*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		if (!pfn_modify_allowed(pfn, prot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 			err = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		pfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	} while (pte++, addr += PAGE_SIZE, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	arch_leave_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	pte_unmap_unlock(mapped_pte, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 			unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	pfn -= addr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	pmd = pmd_alloc(mm, pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	VM_BUG_ON(pmd_trans_huge(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		next = pmd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		err = remap_pte_range(mm, pmd, addr, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 				pfn + (addr >> PAGE_SHIFT), prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	} while (pmd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 			unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 			unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	pfn -= addr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	pud = pud_alloc(mm, p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	if (!pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		next = pud_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		err = remap_pmd_range(mm, pud, addr, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 				pfn + (addr >> PAGE_SHIFT), prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	} while (pud++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 			unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 			unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	pfn -= addr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	p4d = p4d_alloc(mm, pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	if (!p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		next = p4d_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		err = remap_pud_range(mm, p4d, addr, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 				pfn + (addr >> PAGE_SHIFT), prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	} while (p4d++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)  * remap_pfn_range - remap kernel memory to userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)  * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)  * @addr: target page aligned user address to start at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)  * @pfn: page frame number of kernel physical memory address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)  * @size: size of mapping area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)  * @prot: page protection flags for this mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)  * Note: this is only safe if the mm semaphore is held when called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)  * Return: %0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		    unsigned long pfn, unsigned long size, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	unsigned long end = addr + PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	unsigned long remap_pfn = pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	 * Physically remapped pages are special. Tell the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	 * rest of the world about it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	 *   VM_IO tells people not to look at these pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	 *	(accesses can have side effects).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	 *   VM_PFNMAP tells the core MM that the base pages are just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	 *	raw PFN mappings, and do not have a "struct page" associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	 *	with them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	 *   VM_DONTEXPAND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	 *      Disable vma merging and expanding with mremap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	 *   VM_DONTDUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	 *      Omit vma from core dump, even when VM_IO turned off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	 * There's a horrible special case to handle copy-on-write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	 * behaviour that some programs depend on. We mark the "original"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	 * See vm_normal_page() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	if (is_cow_mapping(vma->vm_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 		if (addr != vma->vm_start || end != vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		vma->vm_pgoff = pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	BUG_ON(addr >= end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	pfn -= addr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	pgd = pgd_offset(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	flush_cache_range(vma, addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		next = pgd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		err = remap_p4d_range(mm, pgd, addr, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 				pfn + (addr >> PAGE_SHIFT), prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	} while (pgd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) EXPORT_SYMBOL(remap_pfn_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)  * vm_iomap_memory - remap memory to userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)  * @vma: user vma to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)  * @start: start of the physical memory to be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)  * @len: size of area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)  * This is a simplified io_remap_pfn_range() for common driver use. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)  * driver just needs to give us the physical memory range to be mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)  * we'll figure out the rest from the vma information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)  * whatever write-combining details or similar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)  * Return: %0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	unsigned long vm_len, pfn, pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	/* Check that the physical memory area passed in looks valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	if (start + len < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	 * You *really* shouldn't map things that aren't page-aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	 * but we've historically allowed it because IO memory might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	 * just have smaller alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	len += start & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	pfn = start >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	if (pfn + pages < pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	/* We start the mapping 'vm_pgoff' pages into the area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	if (vma->vm_pgoff > pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	pfn += vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	pages -= vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	/* Can we fit all of the mapping? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	vm_len = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	if (vm_len >> PAGE_SHIFT > pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	/* Ok, let it rip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) EXPORT_SYMBOL(vm_iomap_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 				     unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 				     pte_fn_t fn, void *data, bool create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 				     pgtbl_mod_mask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	if (create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		pte = (mm == &init_mm) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 			pte_alloc_kernel_track(pmd, addr, mask) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		pte = (mm == &init_mm) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			pte_offset_kernel(pmd, addr) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 			pte_offset_map_lock(mm, pmd, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	BUG_ON(pmd_huge(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	arch_enter_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	if (fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 			if (create || !pte_none(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 				err = fn(pte++, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 		} while (addr += PAGE_SIZE, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	*mask |= PGTBL_PTE_MODIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	arch_leave_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	if (mm != &init_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 		pte_unmap_unlock(pte-1, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 				     unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 				     pte_fn_t fn, void *data, bool create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 				     pgtbl_mod_mask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	BUG_ON(pud_huge(*pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	if (create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		pmd = pmd_alloc_track(mm, pud, addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 		next = pmd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 		if (create || !pmd_none_or_clear_bad(pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 			err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 						 create, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	} while (pmd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 				     unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 				     pte_fn_t fn, void *data, bool create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 				     pgtbl_mod_mask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	if (create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		pud = pud_alloc_track(mm, p4d, addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 		if (!pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		next = pud_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 		if (create || !pud_none_or_clear_bad(pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 			err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 						 create, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	} while (pud++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 				     unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 				     pte_fn_t fn, void *data, bool create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 				     pgtbl_mod_mask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	if (create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		if (!p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		next = p4d_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		if (create || !p4d_none_or_clear_bad(p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 			err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 						 create, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	} while (p4d++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 				 unsigned long size, pte_fn_t fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 				 void *data, bool create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	unsigned long start = addr, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	unsigned long end = addr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	pgtbl_mod_mask mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	if (WARN_ON(addr >= end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	pgd = pgd_offset(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 		next = pgd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		if (!create && pgd_none_or_clear_bad(pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	} while (pgd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		arch_sync_kernel_mappings(start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)  * Scan a region of virtual memory, filling in page tables as necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)  * and calling a provided function on each leaf page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 			unsigned long size, pte_fn_t fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	return __apply_to_page_range(mm, addr, size, fn, data, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) EXPORT_SYMBOL_GPL(apply_to_page_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) static bool pte_spinlock(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	pmd_t pmdval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	/* Check if vma is still valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		spin_lock(vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	if (vma_has_changed(vmf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	 * We check if the pmd value is still the same to ensure that there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	 * is not a huge collapse operation in progress in our back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	pmdval = READ_ONCE(*vmf->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	if (!pmd_same(pmdval, vmf->orig_pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	if (unlikely(!spin_trylock(vmf->ptl))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	if (vma_has_changed(vmf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 		spin_unlock(vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	pmd_t pmdval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	 * The first vma_has_changed() guarantees the page-tables are still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	 * valid, having IRQs disabled ensures they stay around, hence the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	 * second vma_has_changed() to make sure they are still valid once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	 * we've got the lock. After that a concurrent zap_pte_range() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	 * block on the PTL and thus we're safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	if (vma_has_changed(vmf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		trace_spf_vma_changed(_RET_IP_, vmf->vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	 * We check if the pmd value is still the same to ensure that there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	 * is not a huge collapse operation in progress in our back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	pmdval = READ_ONCE(*vmf->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	if (!pmd_same(pmdval, vmf->orig_pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		trace_spf_pmd_changed(_RET_IP_, vmf->vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	 * Same as pte_offset_map_lock() except that we call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	 * spin_trylock() in place of spin_lock() to avoid race with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	 * unmap path which may have the lock and wait for this CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	 * to invalidate TLB but this CPU has irq disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	 * Since we are in a speculative patch, accept it could fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	pte = pte_offset_map(vmf->pmd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	if (unlikely(!spin_trylock(ptl))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		pte_unmap(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		trace_spf_pte_lock(_RET_IP_, vmf->vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	if (vma_has_changed(vmf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		pte_unmap_unlock(pte, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 		trace_spf_vma_changed(_RET_IP_, vmf->vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	vmf->pte = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	vmf->ptl = ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) static bool pte_map_lock(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 					       vmf->address, &vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	return __pte_map_lock_speculative(vmf, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 					       addr, &vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	return __pte_map_lock_speculative(vmf, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) static bool __read_mostly allow_file_spec_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) static int __init allow_file_spec_access_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	allow_file_spec_access = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) __setup("allow_file_spec_access", allow_file_spec_access_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) static bool vmf_allows_speculation(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	if (vma_is_anonymous(vmf->vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		 * __anon_vma_prepare() requires the mmap_sem to be held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		 * because vm_next and vm_prev must be safe. This can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 		 * guaranteed in the speculative path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		if (!vmf->vma->anon_vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 			trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	if (!allow_file_spec_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 		 * Can't call vm_ops service has we don't know what they would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		 * do with the VMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 		 * This include huge page from hugetlbfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 		trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	if (!(vmf->vma->vm_flags & VM_SHARED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		(vmf->flags & FAULT_FLAG_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		!vmf->vma->anon_vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		 * non-anonymous private COW without anon_vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		 * See above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	if (vmf->vma->vm_ops->allow_speculation &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		vmf->vma->vm_ops->allow_speculation()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) static inline bool pte_spinlock(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	spin_lock(vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) static inline bool pte_map_lock(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 				       vmf->address, &vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) inline bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 					addr, &vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) static inline bool vmf_allows_speculation(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)  * Scan a region of virtual memory, calling a provided function on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)  * each leaf page table where it exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)  * Unlike apply_to_page_range, this does _not_ fill in page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)  * where they are absent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 				 unsigned long size, pte_fn_t fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	return __apply_to_page_range(mm, addr, size, fn, data, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)  * handle_pte_fault chooses page fault handler according to an entry which was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)  * read non-atomically.  Before making any commitment, on those architectures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)  * parts, do_swap_page must check under lock before unmapping the pte and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)  * proceeding (but do_wp_page is only called after already making such a check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)  * and do_anonymous_page can safely check later on).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)  * pte_unmap_same() returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)  *	0			if the PTE are the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)  *	VM_FAULT_PTNOTSAME	if the PTE are different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)  *	VM_FAULT_RETRY		if the VMA has changed in our back during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)  *				a speculative page fault handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) static inline int pte_unmap_same(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	if (sizeof(pte_t) > sizeof(unsigned long)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 		if (pte_spinlock(vmf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 			if (!pte_same(*vmf->pte, vmf->orig_pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 				ret = VM_FAULT_PTNOTSAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 			spin_unlock(vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 			ret = VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	pte_unmap(vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) static inline bool cow_user_page(struct page *dst, struct page *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 				 struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	void __user *uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	bool locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	unsigned long addr = vmf->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	if (likely(src)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		copy_user_highpage(dst, src, addr, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	 * If the source page was a PFN mapping, we don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	 * a "struct page" for it. We do a best-effort copy by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	 * just copying from the original user address. If that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	 * fails, we just zero-fill it. Live with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	kaddr = kmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	uaddr = (void __user *)(addr & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	 * On architectures with software "accessed" bits, we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	 * take a double page fault, so mark it accessed here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 		pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 			 * Other thread has already handled the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 			 * and update local tlb only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 			update_mmu_tlb(vma, addr, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 			ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 			goto pte_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 		entry = pte_mkyoung(vmf->orig_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 			update_mmu_cache(vma, addr, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	 * This really shouldn't fail, because the page is there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	 * in the page tables. But it might just be unreadable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	 * in which case we just give up and fill the result with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	 * zeroes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 		if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 			goto warn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		/* Re-validate under PTL if the page is still mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 		locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 			/* The PTE changed under us, update local tlb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 			update_mmu_tlb(vma, addr, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 			ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 			goto pte_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 		 * The same page can be mapped back since last copy attempt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 		 * Try to copy again under PTL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 			 * Give a warn in case there can be some obscure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 			 * use-case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) warn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 			WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 			clear_page(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) pte_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 		pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	flush_dcache_page(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	struct file *vm_file = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	if (vm_file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	 * Special mappings (e.g. VDSO) do not have any file so fake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	 * a default GFP_KERNEL for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	return GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)  * Notify the address space that the page is about to become writable so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)  * it can prohibit this or wait for the page to get into an appropriate state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)  * We do this without the lock held, so that it can sleep if it needs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	struct page *page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	unsigned int old_flags = vmf->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	if (vmf->vma->vm_file &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	/* Restore original flags so that caller is not surprised */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	vmf->flags = old_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 		lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 		if (!page->mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 			unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 			return 0; /* retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		ret |= VM_FAULT_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 		VM_BUG_ON_PAGE(!PageLocked(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)  * Handle dirtying of a page in shared file mapping on a write fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)  * The function expects the page to be locked and unlocks it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	struct page *page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	bool dirtied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	dirtied = set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	VM_BUG_ON_PAGE(PageAnon(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	 * Take a local copy of the address_space - page.mapping may be zeroed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	 * by truncate after unlock_page().   The address_space itself remains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	 * release semantics to prevent the compiler from undoing this copying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	mapping = page_rmapping(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	if (!page_mkwrite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		file_update_time(vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	 * Throttle page dirtying rate down to writeback speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	 * mapping may be NULL here because some device drivers do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	 * set page.mapping but still dirty their pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	 * Drop the mmap_lock before waiting on IO, if we can. The file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	 * is pinning the mapping, as per above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	if ((dirtied || page_mkwrite) && mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 		struct file *fpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 		balance_dirty_pages_ratelimited(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 		if (fpin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 			fput(fpin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 			return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)  * Handle write page faults for pages that can be reused in the current vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)  * This can happen either due to the mapping being with the VM_SHARED flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)  * or due to us being the last reference standing to the page. In either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)  * case, all we need to do here is to mark the page as writable and update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)  * any related book-keeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) static inline void wp_page_reuse(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 	__releases(vmf->ptl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	struct page *page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	 * Clear the pages cpupid information as the existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	 * information potentially belongs to a now completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	 * unrelated process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	entry = pte_mkyoung(vmf->orig_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 		update_mmu_cache(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	count_vm_event(PGREUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)  * Handle the case of a page which we actually need to copy to a new page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)  * Called with mmap_lock locked and the old page referenced, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)  * without the ptl held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)  * High level logic flow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)  * - Allocate a page, copy the content of the old page to the new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)  * - Take the PTL. If the pte changed, bail out and release the allocated page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)  * - If the pte is still the way we remember it, update the page table and all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)  *   relevant references. This includes dropping the reference the page-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)  *   held to the old page, as well as updating the rmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)  * - In any case, unlock the PTL and drop the reference we took to the old page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) static vm_fault_t wp_page_copy(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	struct page *old_page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	struct page *new_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	int page_copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	struct mmu_notifier_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	vm_fault_t ret = VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	if (unlikely(anon_vma_prepare(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		new_page = alloc_zeroed_user_highpage_movable(vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 							      vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		if (!new_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 				vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		if (!new_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 		if (!cow_user_page(new_page, old_page, vmf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 			 * COW failed, if the fault was solved by other,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 			 * it's fine. If not, userspace would re-fault on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 			 * the same address and we will handle the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 			 * from the second attempt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 			put_page(new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 			if (old_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 				put_page(old_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		trace_android_vh_cow_user_page(vmf, new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		goto out_free_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	__SetPageUptodate(new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 				vmf->address & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	mmu_notifier_invalidate_range_start(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 	 * Re-check the pte - we dropped the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 	if (!pte_map_lock(vmf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 		ret = VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		goto out_invalidate_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 		if (old_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 			if (!PageAnon(old_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 				dec_mm_counter_fast(mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 						mm_counter_file(old_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 				inc_mm_counter_fast(mm, MM_ANONPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 			inc_mm_counter_fast(mm, MM_ANONPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		entry = mk_pte(new_page, vmf->vma_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 		entry = pte_sw_mkyoung(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 		entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		 * Clear the pte entry and flush it first, before updating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 		 * pte with the new entry. This will avoid a race condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 		 * seen in the presence of one thread doing SMC and another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 		 * thread doing COW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 		__page_add_new_anon_rmap(new_page, vma, vmf->address, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 		__lru_cache_add_inactive_or_unevictable(new_page, vmf->vma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 		 * We call the notify macro here because, when using secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 		 * mmu page tables (such as kvm shadow page tables), we want the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 		 * new page to be mapped directly into the secondary page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 		update_mmu_cache(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 		if (old_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 			 * Only after switching the pte to the new page may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 			 * we remove the mapcount here. Otherwise another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 			 * process may come and find the rmap count decremented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 			 * before the pte is switched to the new page, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 			 * "reuse" the old page writing into it while our pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 			 * here still points into it and can be read by other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 			 * threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 			 * The critical issue is to order this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 			 * page_remove_rmap with the ptp_clear_flush above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 			 * Those stores are ordered by (if nothing else,)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 			 * the barrier present in the atomic_add_negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 			 * in page_remove_rmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 			 * Then the TLB flush in ptep_clear_flush ensures that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 			 * no process can access the old page before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 			 * decremented mapcount is visible. And the old page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 			 * cannot be reused until after the decremented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 			 * mapcount is visible. So transitively, TLBs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 			 * old page will be flushed before it can be reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 			page_remove_rmap(old_page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 		/* Free the old page.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 		new_page = old_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		page_copied = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 		update_mmu_tlb(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	if (new_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 		put_page(new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 	 * No need to double call mmu_notifier->invalidate_range() callback as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	 * the above ptep_clear_flush_notify() did already call it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	mmu_notifier_invalidate_range_only_end(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 	if (old_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 		 * Don't let another task, with possibly unlocked vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 		 * keep the mlocked page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		if (page_copied && (vmf->vma_flags & VM_LOCKED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 			lock_page(old_page);	/* LRU manipulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 			if (PageMlocked(old_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 				munlock_vma_page(old_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 			unlock_page(old_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 		put_page(old_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 	return page_copied ? VM_FAULT_WRITE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) out_invalidate_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	mmu_notifier_invalidate_range_only_end(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) out_free_new:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	put_page(new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	if (old_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 		put_page(old_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)  *			  writeable once the page is prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)  * @vmf: structure describing the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)  * This function handles all that is needed to finish a write page fault in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)  * shared mapping due to PTE being read-only once the mapped page is prepared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)  * It handles locking of PTE and modifying it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)  * The function expects the page to be locked or other protection against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)  * concurrent faults / writeback (such as DAX radix tree locks).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275)  * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)  * we acquired PTE lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	WARN_ON_ONCE(!(vmf->vma_flags & VM_SHARED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	if (!pte_map_lock(vmf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	 * We might have raced with another page fault while we released the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 	 * pte_offset_map_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 		pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		return VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	wp_page_reuse(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)  * mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 		vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 		pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 		vmf->flags |= FAULT_FLAG_MKWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 		ret = vma->vm_ops->pfn_mkwrite(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 		return finish_mkwrite_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 	wp_page_reuse(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 	return VM_FAULT_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) static vm_fault_t wp_page_shared(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	__releases(vmf->ptl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	vm_fault_t ret = VM_FAULT_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 	get_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		vm_fault_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 		pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 		tmp = do_page_mkwrite(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 		if (unlikely(!tmp || (tmp &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 			put_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 			return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		tmp = finish_mkwrite_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 			unlock_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 			put_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 			return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 		wp_page_reuse(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 		lock_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	ret |= fault_dirty_shared_page(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 	put_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353)  * This routine handles present pages, when users try to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354)  * to a shared page. It is done by copying the page to a new address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)  * and decrementing the shared-page counter for the old page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357)  * Note that this routine assumes that the protection checks have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)  * done by the caller (the low-level page fault routine in most cases).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359)  * Thus we can safely just mark it writable once we've done any necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360)  * COW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362)  * We also mark the page dirty at this point even though the page will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)  * change only once the write actually happens. This avoids a few races,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)  * and potentially makes it more efficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366)  * We enter with non-exclusive mmap_lock (to exclude vma changes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367)  * but allow concurrent faults), with pte both mapped and locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368)  * We return with mmap_lock still held, but pte unmapped and unlocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) static vm_fault_t do_wp_page(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 	__releases(vmf->ptl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	if (userfaultfd_pte_wp(vma, *vmf->pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 		pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 		return handle_userfault(vmf, VM_UFFD_WP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	 * Userfaultfd write-protect can defer flushes. Ensure the TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	 * is flushed in this case before copying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	if (unlikely(userfaultfd_wp(vmf->vma) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 		     mm_tlb_flush_pending(vmf->vma->vm_mm)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 		flush_tlb_page(vmf->vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	vmf->page = _vm_normal_page(vma, vmf->address, vmf->orig_pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 					vmf->vma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 	if (!vmf->page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 		 * VM_PFNMAP VMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 		 * We should not cow pages in a shared writeable mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 		if ((vmf->vma_flags & (VM_WRITE|VM_SHARED)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 				     (VM_WRITE|VM_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 			return wp_pfn_shared(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 		pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 		return wp_page_copy(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 	 * Take out anonymous pages first, anonymous shared vmas are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	 * not dirty accountable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 	if (PageAnon(vmf->page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 		struct page *page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 		/* PageKsm() doesn't necessarily raise the page refcount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 		if (PageKsm(page) || page_count(page) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 			goto copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 		if (!trylock_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 			goto copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 		if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 			unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 			goto copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 		 * Ok, we've got the only map reference, and the only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 		 * page count reference, and the page is locked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 		 * it's dark out, and we're wearing sunglasses. Hit it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		wp_page_reuse(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		return VM_FAULT_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 	} else if (unlikely((vmf->vma_flags & (VM_WRITE|VM_SHARED)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 					(VM_WRITE|VM_SHARED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 		return wp_page_shared(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) copy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	 * Ok, we need to copy. Oh, well..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	get_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	return wp_page_copy(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) static void unmap_mapping_range_vma(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 		unsigned long start_addr, unsigned long end_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 		struct zap_details *details)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 					    struct zap_details *details)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 	pgoff_t vba, vea, zba, zea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	vma_interval_tree_foreach(vma, root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 			details->first_index, details->last_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 		vba = vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 		vea = vba + vma_pages(vma) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 		zba = details->first_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 		if (zba < vba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 			zba = vba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 		zea = details->last_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 		if (zea > vea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 			zea = vea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		unmap_mapping_range_vma(vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 				details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)  * unmap_mapping_page() - Unmap single page from processes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478)  * @page: The locked page to be unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480)  * Unmap this page from any userspace process which still has it mmaped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481)  * Typically, for efficiency, the range of nearby pages has already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482)  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)  * truncation or invalidation holds the lock on a page, it may find that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)  * the page has been remapped again: and then uses unmap_mapping_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485)  * to unmap it finally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) void unmap_mapping_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	struct address_space *mapping = page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	struct zap_details details = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	VM_BUG_ON(!PageLocked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	VM_BUG_ON(PageTail(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	details.check_mapping = mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 	details.first_index = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 	details.last_index = page->index + thp_nr_pages(page) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	details.single_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	i_mmap_lock_write(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	i_mmap_unlock_write(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507)  * unmap_mapping_pages() - Unmap pages from processes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508)  * @mapping: The address space containing pages to be unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509)  * @start: Index of first page to be unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511)  * @even_cows: Whether to unmap even private COWed pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513)  * Unmap the pages in this address space from any userspace process which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514)  * has them mmaped.  Generally, you want to remove COWed pages as well when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515)  * a file is being truncated, but not when invalidating pages from the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)  * cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 		pgoff_t nr, bool even_cows)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	struct zap_details details = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	details.check_mapping = even_cows ? NULL : mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 	details.first_index = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	details.last_index = start + nr - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	if (details.last_index < details.first_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		details.last_index = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	i_mmap_lock_write(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	i_mmap_unlock_write(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536)  * unmap_mapping_range - unmap the portion of all mmaps in the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537)  * address_space corresponding to the specified byte range in the underlying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)  * file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)  * @mapping: the address space containing mmaps to be unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541)  * @holebegin: byte in first page to unmap, relative to the start of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542)  * the underlying file.  This will be rounded down to a PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)  * boundary.  Note that this is different from truncate_pagecache(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544)  * must keep the partial page.  In contrast, we must get rid of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)  * partial pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546)  * @holelen: size of prospective hole in bytes.  This will be rounded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)  * end of the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549)  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550)  * but 0 when invalidating pagecache, don't throw away private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) void unmap_mapping_range(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 		loff_t const holebegin, loff_t const holelen, int even_cows)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	pgoff_t hba = holebegin >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 	/* Check for overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	if (sizeof(holelen) > sizeof(hlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 		long long holeend =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 		if (holeend & ~(long long)ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 			hlen = ULONG_MAX - hba + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) EXPORT_SYMBOL(unmap_mapping_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571)  * We enter with non-exclusive mmap_lock (to exclude vma changes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)  * but allow concurrent faults), and pte mapped but not yet locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573)  * We return with pte unmapped and unlocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575)  * We return with the mmap_lock locked or unlocked in the same cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576)  * as does filemap_fault().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) vm_fault_t do_swap_page(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	struct page *page = NULL, *swapcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	swp_entry_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	int locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	int exclusive = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	void *shadow = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	ret = pte_unmap_same(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 		 * If pte != orig_pte, this means another thread did the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 		 * swap operation in our back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 		 * So nothing else to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 		if (ret == VM_FAULT_PTNOTSAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	entry = pte_to_swp_entry(vmf->orig_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 	if (unlikely(non_swap_entry(entry))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 		if (is_migration_entry(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 			migration_entry_wait(vma->vm_mm, vmf->pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 					     vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 		} else if (is_device_private_entry(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 			vmf->page = device_private_entry_to_page(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 		} else if (is_hwpoison_entry(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 			ret = VM_FAULT_HWPOISON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 			ret = VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	page = lookup_swap_cache(entry, vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	swapcache = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 		struct swap_info_struct *si = swp_swap_info(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 		    __swap_count(entry) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 			/* skip swapcache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 			gfp_t flags = GFP_HIGHUSER_MOVABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 			trace_android_rvh_set_skip_swapcache_flags(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 			page = alloc_page_vma(flags, vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 			if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 				int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 				__SetPageLocked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 				__SetPageSwapBacked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 				set_page_private(page, entry.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 				/* Tell memcg to use swap ownership records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 				SetPageSwapCache(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 				err = mem_cgroup_charge(page, vma->vm_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 							GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 				ClearPageSwapCache(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 				if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 					ret = VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 					goto out_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 				shadow = get_shadow_from_swap_cache(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 				if (shadow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 					workingset_refault(page, shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 				lru_cache_add(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 				swap_readpage(page, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 		} else if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 			 * Don't try readahead during a speculative page fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 			 * as the VMA's boundaries may change in our back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 			 * If the page is not in the swap cache and synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 			 * read is disabled, fall back to the regular page fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 			 * mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 			ret = VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 						vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 			swapcache = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 		if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 			 * Back out if the VMA has changed in our back during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 			 * a speculative page fault or if somebody else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 			 * faulted in this pte while we released the pte lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 			if (!pte_map_lock(vmf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 				delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 				ret = VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 				ret = VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 		/* Had to read the page from swap area: Major fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 		ret = VM_FAULT_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 		count_vm_event(PGMAJFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 	} else if (PageHWPoison(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 		 * hwpoisoned dirty swapcache pages are kept for killing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 		 * owner processes (which may be unknown at hwpoison time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 		ret = VM_FAULT_HWPOISON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 		goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 	if (!locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 		ret |= VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 		goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	 * release the swapcache from under us.  The page pin, and pte_same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	 * test below, are not enough to exclude that.  Even if it is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	 * swapcache, we need to check that the page's swap has not changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 	if (unlikely((!PageSwapCache(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 			page_private(page) != entry.val)) && swapcache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 		goto out_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 	page = ksm_might_need_to_copy(page, vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	if (unlikely(!page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 		ret = VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 		page = swapcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 		goto out_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	cgroup_throttle_swaprate(page, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 	 * Back out if the VMA has changed in our back during a speculative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 	 * page fault or if somebody else already faulted in this pte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	if (!pte_map_lock(vmf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 		ret = VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 		goto out_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 		goto out_nomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 	if (unlikely(!PageUptodate(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 		ret = VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 		goto out_nomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 	 * The page isn't present yet, go ahead with the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 	 * Be careful about the sequence of operations here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 	 * To get its accounting right, reuse_swap_page() must be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 	 * while the page is counted on swap but not yet in mapcount i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 	 * must be called after the swap_free(), or it will never succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 	pte = mk_pte(page, vmf->vma_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 		pte = maybe_mkwrite(pte_mkdirty(pte), vmf->vma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 		vmf->flags &= ~FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 		ret |= VM_FAULT_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 		exclusive = RMAP_EXCLUSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 	flush_icache_page(vma, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	if (pte_swp_soft_dirty(vmf->orig_pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 		pte = pte_mksoft_dirty(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 	if (pte_swp_uffd_wp(vmf->orig_pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 		pte = pte_mkuffd_wp(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 		pte = pte_wrprotect(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 	vmf->orig_pte = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	/* ksm created a completely new copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 	if (unlikely(page != swapcache && swapcache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 		__page_add_new_anon_rmap(page, vma, vmf->address, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 		__lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 	trace_android_vh_swapin_add_anon_rmap(vmf, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 	swap_free(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 	if (mem_cgroup_swap_full(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 	    (vmf->vma_flags & VM_LOCKED) || PageMlocked(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 		try_to_free_swap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 	if (page != swapcache && swapcache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 		 * Hold the lock to avoid the swap entry to be reused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 		 * until we take the PT lock for the pte_same() check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 		 * (to avoid false positives from pte_same). For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 		 * further safety release the lock after the swap_free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 		 * so that the swap count won't change under a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 		 * parallel locked swapcache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 		unlock_page(swapcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 		put_page(swapcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 	if (vmf->flags & FAULT_FLAG_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 		ret |= do_wp_page(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 		if (ret & VM_FAULT_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 			ret &= VM_FAULT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 	/* No need to invalidate - it was non-present before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 	update_mmu_cache(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 	pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) out_nomap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 	pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) out_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 	if (page != swapcache && swapcache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 		unlock_page(swapcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 		put_page(swapcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)  * We enter with non-exclusive mmap_lock (to exclude vma changes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834)  * but allow concurrent faults), and pte mapped but not yet locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835)  * We return with mmap_lock still held, but pte unmapped and unlocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 	vm_fault_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 	pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 	/* File mapping without ->vm_ops ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 	if (vmf->vma_flags & VM_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 	 * pte_offset_map() on pmds where a huge pmd might be created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 	 * from a different thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 	 * parallel threads are excluded by other means.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 	 * Here we only have mmap_read_lock(mm).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 	if (pte_alloc(vma->vm_mm, vmf->pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 		return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 	/* See comment in handle_pte_fault() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 	if (unlikely(pmd_trans_unstable(vmf->pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 	/* Use the zero-page for reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 			!mm_forbids_zeropage(vma->vm_mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 						vmf->vma_page_prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 		if (!pte_map_lock(vmf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 			return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 		if (!pte_none(*vmf->pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 			update_mmu_tlb(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 		ret = check_stable_address_space(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 		 * Don't call the userfaultfd during the speculative path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 		 * We already checked for the VMA to not be managed through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 		 * userfaultfd, but it may be set in our back once we have lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 		 * the pte. In such a case we can ignore it this time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 			goto setpte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 		/* Deliver the page fault to userland, check inside PT lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 		if (userfaultfd_missing(vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 			pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 			return handle_userfault(vmf, VM_UFFD_MISSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 		goto setpte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 	/* Allocate our own private page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 	if (unlikely(anon_vma_prepare(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 		goto oom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 		goto oom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 		goto oom_free_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 	cgroup_throttle_swaprate(page, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 	 * The memory barrier inside __SetPageUptodate makes sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 	 * preceding stores to the page contents become visible before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 	 * the set_pte_at() write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 	__SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 	entry = mk_pte(page, vmf->vma_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 	entry = pte_sw_mkyoung(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 	if (vmf->vma_flags & VM_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 		entry = pte_mkwrite(pte_mkdirty(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 	if (!pte_map_lock(vmf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 		ret = VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 		goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 	if (!pte_none(*vmf->pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 		update_mmu_cache(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 		goto unlock_and_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 	ret = check_stable_address_space(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 		goto unlock_and_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 	/* Deliver the page fault to userland, check inside PT lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 				userfaultfd_missing(vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 		pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 		return handle_userfault(vmf, VM_UFFD_MISSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 	__page_add_new_anon_rmap(page, vma, vmf->address, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 	__lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) setpte:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 	/* No need to invalidate - it was non-present before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 	update_mmu_cache(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) unlock_and_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 	pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) oom_free_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) oom:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 	return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963)  * The mmap_lock must have been held on entry, and may have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964)  * released depending on flags and vma->vm_ops->fault() return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)  * See filemap_fault() and __lock_page_retry().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) static vm_fault_t __do_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 	 * Preallocate pte before we take page_lock because this might lead to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 	 * deadlocks for memcg reclaim which waits for pages under writeback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 	 *				lock_page(A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 	 *				SetPageWriteback(A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 	 *				unlock_page(A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 	 * lock_page(B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 	 *				lock_page(B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	 * pte_alloc_one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	 *   shrink_page_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 	 *     wait_on_page_writeback(A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 	 *				SetPageWriteback(B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 	 *				unlock_page(B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 	 *				# flush A, B to clear the writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 		if (!vmf->prealloc_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 			return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 		smp_wmb(); /* See comment in __pte_alloc() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 	ret = vma->vm_ops->fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 			    VM_FAULT_DONE_COW)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 	if (unlikely(PageHWPoison(vmf->page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 		struct page *page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 		if (ret & VM_FAULT_LOCKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 			if (page_mapped(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 				unmap_mapping_pages(page_mapping(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 						    page->index, 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 			/* Retry if a clean page was removed from the cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 			if (invalidate_inode_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 				poisonret = VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 			unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 		vmf->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 		return poisonret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 		lock_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) static void deposit_prealloc_pte(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 	 * We are going to consume the prealloc table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 	 * count that as nr_ptes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 	mm_inc_nr_ptes(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 	vmf->prealloc_pte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 	bool write = vmf->flags & FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 	pmd_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 	vm_fault_t ret = VM_FAULT_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 	if (!transhuge_vma_suitable(vma, haddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 	page = compound_head(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 	if (compound_order(page) != HPAGE_PMD_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 	 * Archs like ppc64 need additonal space to store information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	 * related to pte entry. Use the preallocated table for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 		if (!vmf->prealloc_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 			return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 		smp_wmb(); /* See comment in __pte_alloc() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 	if (unlikely(!pmd_none(*vmf->pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 	for (i = 0; i < HPAGE_PMD_NR; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 		flush_icache_page(vma, page + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 	entry = mk_huge_pmd(page, vmf->vma_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 	if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	page_add_file_rmap(page, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 	 * deposit and withdraw with pmd lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 	if (arch_needs_pgtable_deposit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 		deposit_prealloc_pte(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 	/* fault is handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 	count_vm_event(THP_FILE_MAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 	spin_unlock(vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 	return VM_FAULT_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 	bool write = vmf->flags & FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 	bool prefault = vmf->address != addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 	pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 	flush_icache_page(vma, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 	entry = mk_pte(page, vmf->vma_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 	if (prefault && arch_wants_old_prefaulted_pte())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 		entry = pte_mkold(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 		entry = pte_sw_mkyoung(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 	if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 		entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 	/* copy-on-write page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 	if (write && !(vmf->vma_flags & VM_SHARED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 		__page_add_new_anon_rmap(page, vma, addr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 		__lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 		page_add_file_rmap(page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 	set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132)  * finish_fault - finish page fault once we have prepared the page to fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134)  * @vmf: structure describing the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)  * This function handles all that is needed to finish a page fault once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137)  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138)  * given page, adds reverse page mapping, handles memcg charges and LRU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139)  * addition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141)  * The function expects the page to be locked and on success it consumes a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142)  * reference of a page being mapped (for the PTE which maps it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144)  * Return: %0 on success, %VM_FAULT_ code in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) vm_fault_t finish_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 	/* Did we COW the page? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	if ((vmf->flags & FAULT_FLAG_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 	    !(vmf->vma_flags & VM_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 		page = vmf->cow_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 		page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	 * check even for read faults because we might have lost our CoWed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	 * page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 	if (!(vma->vm_flags & VM_SHARED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 		ret = check_stable_address_space(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 	if (pmd_none(*vmf->pmd) && !(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 		if (PageTransCompound(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 			ret = do_set_pmd(vmf, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 			if (ret != VM_FAULT_FALLBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 		if (vmf->prealloc_pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 			vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 			if (likely(pmd_none(*vmf->pmd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 				mm_inc_nr_ptes(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 				pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 				vmf->prealloc_pte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 			spin_unlock(vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 		} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 			return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 	/* See comment in handle_pte_fault() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 	if (pmd_devmap_trans_unstable(vmf->pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 	if (!pte_map_lock(vmf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 	/* Re-check under ptl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 	if (likely(pte_none(*vmf->pte)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 		do_set_pte(vmf, page, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 		ret = VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 	update_mmu_tlb(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 	pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) static unsigned long fault_around_bytes __read_mostly =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 	rounddown_pow_of_two(65536);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) static int fault_around_bytes_get(void *data, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 	*val = fault_around_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219)  * fault_around_bytes must be rounded down to the nearest page order as it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220)  * what do_fault_around() expects to see.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) static int fault_around_bytes_set(void *data, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 	if (val / PAGE_SIZE > PTRS_PER_PTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 	if (val > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 		fault_around_bytes = rounddown_pow_of_two(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) static int __init fault_around_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 				   &fault_around_bytes_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) late_initcall(fault_around_debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245)  * do_fault_around() tries to map few pages around the fault address. The hope
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246)  * is that the pages will be needed soon and this will lower the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247)  * faults to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)  * not ready to be mapped: not up-to-date, locked, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252)  * This function is called with the page table lock taken. In the split ptlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253)  * case the page table lock only protects only those entries which belong to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254)  * the page table corresponding to the fault address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256)  * This function doesn't cross the VMA boundaries, in order to call map_pages()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257)  * only once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259)  * fault_around_bytes defines how many bytes we'll try to map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260)  * do_fault_around() expects it to be set to a power of two less than or equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261)  * to PTRS_PER_PTE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263)  * The virtual address of the area that we map is naturally aligned to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264)  * fault_around_bytes rounded down to the machine page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265)  * (and therefore to page order).  This way it's easier to guarantee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266)  * that we don't cross page table boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) static vm_fault_t do_fault_around(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 	unsigned long address = vmf->address, nr_pages, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 	pgoff_t start_pgoff = vmf->pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 	pgoff_t end_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 	int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 	address = max(address & mask, vmf->vma->vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 	off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 	start_pgoff -= off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 	 *  end_pgoff is either the end of the page table, the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 	 *  the vma or nr_pages from start_pgoff, depending what is nearest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 	end_pgoff = start_pgoff -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 		PTRS_PER_PTE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 			start_pgoff + nr_pages - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 	if (pmd_none(*vmf->pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 		if (!vmf->prealloc_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 			return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 		smp_wmb(); /* See comment in __pte_alloc() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) static vm_fault_t do_read_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 	vm_fault_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 	 * Let's call ->map_pages() first and use ->fault() as fallback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 	 * if page by the offset is not ready to be mapped (cold cache or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 	 * something).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 		if (likely(!userfaultfd_minor(vmf->vma))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 			ret = do_fault_around(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 	ret = __do_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 	ret |= finish_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 	unlock_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 		put_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) static vm_fault_t do_cow_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 	if (unlikely(anon_vma_prepare(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 		return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 	if (!vmf->cow_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 		return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 	if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 		put_page(vmf->cow_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 		return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 	ret = __do_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 		goto uncharge_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 	if (ret & VM_FAULT_DONE_COW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 	__SetPageUptodate(vmf->cow_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 	ret |= finish_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 	unlock_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 	put_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 		goto uncharge_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) uncharge_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 	put_page(vmf->cow_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) static vm_fault_t do_shared_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 	vm_fault_t ret, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 	ret = __do_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) 	 * Check if the backing address space wants to know that the page is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 	 * about to become writable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) 	if (vma->vm_ops->page_mkwrite) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 		unlock_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 		tmp = do_page_mkwrite(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 		if (unlikely(!tmp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 			put_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 			return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 	ret |= finish_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 					VM_FAULT_RETRY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 		unlock_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 		put_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 	ret |= fault_dirty_shared_page(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405)  * We enter with non-exclusive mmap_lock (to exclude vma changes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)  * but allow concurrent faults).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407)  * The mmap_lock may have been released depending on flags and our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408)  * return value.  See filemap_fault() and __lock_page_or_retry().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409)  * If mmap_lock is released, vma may become invalid (for example
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410)  * by other thread calling munmap()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) static vm_fault_t do_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 	struct mm_struct *vm_mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 	if (!vma->vm_ops->fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 		 * If we find a migration pmd entry or a none pmd entry, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 		 * should never happen, return SIGBUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 		if (unlikely(!pmd_present(*vmf->pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 			ret = VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 						       vmf->pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 						       vmf->address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 						       &vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 			 * Make sure this is not a temporary clearing of pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 			 * by holding ptl and checking again. A R/M/W update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 			 * of pte involves: take ptl, clearing the pte so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 			 * we don't have concurrent modification by hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 			 * followed by an update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 			if (unlikely(pte_none(*vmf->pte)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 				ret = VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 				ret = VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 			pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 		ret = do_read_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 	else if (!(vmf->vma_flags & VM_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 		ret = do_cow_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 		ret = do_shared_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 	/* preallocated pagetable is unused: free it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 	if (vmf->prealloc_pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) 		pte_free(vm_mm, vmf->prealloc_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) 		vmf->prealloc_pte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 				unsigned long addr, int page_nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) 				int *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 	get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 	count_vm_numa_event(NUMA_HINT_FAULTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 	if (page_nid == numa_node_id()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 		*flags |= TNF_FAULT_LOCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 	return mpol_misplaced(page, vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) static vm_fault_t do_numa_page(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 	struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 	int page_nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 	int last_cpupid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) 	int target_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 	bool migrated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) 	pte_t pte, old_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 	bool was_writable = pte_savedwrite(vmf->orig_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) 	int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) 	 * The "pte" at this point cannot be used safely without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) 	 * validation through pte_unmap_same(). It's of NUMA type but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) 	 * the pfn may be screwed if the read is non atomic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) 	if (!pte_spinlock(vmf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 		pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) 	 * Make it present again, Depending on how arch implementes non
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) 	 * accessible ptes, some can allow access by kernel mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 	pte = pte_modify(old_pte, vmf->vma_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 	pte = pte_mkyoung(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 	if (was_writable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) 		pte = pte_mkwrite(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) 	update_mmu_cache(vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) 	page = _vm_normal_page(vma, vmf->address, pte, vmf->vma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) 	if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) 		pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) 	/* TODO: handle PTE-mapped THP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) 	if (PageCompound(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) 		pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 	 * much anyway since they can be in shared cache state. This misses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) 	 * the case where a mapping is writable but the process never writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) 	 * to it but pte_write gets cleared during protection updates and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 	 * background writeback, dirty balancing and application behaviour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 	if (!pte_write(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) 		flags |= TNF_NO_GROUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 	 * Flag if the page is shared between multiple address spaces. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 	 * is later used when determining whether to group tasks together
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) 	if (page_mapcount(page) > 1 && (vmf->vma_flags & VM_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) 		flags |= TNF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) 	last_cpupid = page_cpupid_last(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) 	page_nid = page_to_nid(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) 			&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) 	pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 	if (target_nid == NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 	/* Migrate to the requested node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 	migrated = migrate_misplaced_page(page, vmf, target_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) 	if (migrated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 		page_nid = target_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) 		flags |= TNF_MIGRATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 		flags |= TNF_MIGRATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 	if (page_nid != NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 		task_numa_fault(last_cpupid, page_nid, 1, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) 	if (vma_is_anonymous(vmf->vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) 		return do_huge_pmd_anonymous_page(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) 	if (vmf->vma->vm_ops->huge_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 	return VM_FAULT_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) /* `inline' is required to avoid gcc 4.1.2 build error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 	if (vma_is_anonymous(vmf->vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 		if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 			return handle_userfault(vmf, VM_UFFD_WP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 		return do_huge_pmd_wp_page(vmf, orig_pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 	if (vmf->vma->vm_ops->huge_fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 		if (!(ret & VM_FAULT_FALLBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 	/* COW or write-notify handled on pte level: split pmd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 	return VM_FAULT_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) static vm_fault_t create_huge_pud(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 	/* No support for anonymous transparent PUD pages yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) 	if (vma_is_anonymous(vmf->vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) 		goto split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 	if (vmf->vma->vm_ops->huge_fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 		if (!(ret & VM_FAULT_FALLBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) split:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) 	/* COW or write-notify not handled on PUD level: split pud.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) 	__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 	return VM_FAULT_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 	/* No support for anonymous transparent PUD pages yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 	if (vma_is_anonymous(vmf->vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) 		return VM_FAULT_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 	if (vmf->vma->vm_ops->huge_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 	return VM_FAULT_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630)  * These routines also need to handle stuff like marking pages dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631)  * and/or accessed for architectures that don't do it in hardware (most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632)  * RISC architectures).  The early dirtying is also good on the i386.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634)  * There is also a hook called "update_mmu_cache()" that architectures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635)  * with external mmu caches can use to update those (ie the Sparc or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636)  * PowerPC hashed page tables that act as extended TLBs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638)  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639)  * concurrent faults).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641)  * The mmap_lock may have been released depending on flags and our return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642)  * See filemap_fault() and __lock_page_or_retry().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) 	pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 	vm_fault_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) 	if (unlikely(pmd_none(*vmf->pmd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) 		 * In the case of the speculative page fault handler we abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) 		 * the speculative path immediately as the pmd is probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) 		 * in the way to be converted in a huge one. We will try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) 		 * again holding the mmap_sem (which implies that the collapse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) 		 * operation is done).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) 		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) 			return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) 		 * Leave __pte_alloc() until later: because vm_ops->fault may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) 		 * want to allocate huge page, and if we expose page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) 		 * for an instant, it will be difficult to retract from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) 		 * concurrent faults and from rmap lookups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) 		vmf->pte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) 	} else if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) 		 * If a huge pmd materialized under us just retry later.  Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) 		 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) 		 * of pmd_trans_huge() to ensure the pmd didn't become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) 		 * pmd_trans_huge under us and then back to pmd_none, as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 		 * result of MADV_DONTNEED running immediately after a huge pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) 		 * fault in a different thread of this mm, in turn leading to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) 		 * misleading pmd_trans_huge() retval. All we have to ensure is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) 		 * that it is a regular pmd that we can walk with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) 		 * pte_offset_map() and we can do that through an atomic read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) 		 * in C, which is what pmd_trans_unstable() provides.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) 		if (pmd_devmap_trans_unstable(vmf->pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) 		 * A regular pmd is established and it can't morph into a huge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) 		 * pmd from under us anymore at this point because we hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 		 * mmap_lock read mode and khugepaged takes it in write mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 		 * So now it's safe to run pte_offset_map().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 		 * This is not applicable to the speculative page fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 		 * but in that case, the pte is fetched earlier in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 		 * handle_speculative_fault().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 		vmf->orig_pte = *vmf->pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 		 * some architectures can have larger ptes than wordsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) 		 * accesses.  The code below just needs a consistent view
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 		 * for the ifs and we later double check anyway with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) 		 * ptl lock held. So here a barrier will do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) 		if (pte_none(vmf->orig_pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) 			pte_unmap(vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) 			vmf->pte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) 	if (!vmf->pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) 		if (vma_is_anonymous(vmf->vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 			return do_anonymous_page(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) 		else if ((vmf->flags & FAULT_FLAG_SPECULATIVE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 				!vmf_allows_speculation(vmf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) 			return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) 			return do_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) 	if (!pte_present(vmf->orig_pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) 		return do_swap_page(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) 		return do_numa_page(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) 	if (!pte_spinlock(vmf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) 	entry = vmf->orig_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) 	if (unlikely(!pte_same(*vmf->pte, entry))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) 	if (vmf->flags & FAULT_FLAG_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) 		if (!pte_write(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) 			if (!(vmf->flags & FAULT_FLAG_SPECULATIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) 				return do_wp_page(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) 			if (!mmu_notifier_trylock(vmf->vma->vm_mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) 				ret = VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) 				goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) 			ret = do_wp_page(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) 			mmu_notifier_unlock(vmf->vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) 		entry = pte_mkdirty(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) 	entry = pte_mkyoung(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) 				vmf->flags & FAULT_FLAG_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) 		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) 		/* Skip spurious TLB flush for retried page fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) 		if (vmf->flags & FAULT_FLAG_TRIED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) 		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) 			ret = VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) 		 * This is needed only for protection faults but the arch code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) 		 * is not yet telling us if this is a protection fault or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) 		 * This still avoids useless tlb flushes for .text page faults
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) 		 * with threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) 		if (vmf->flags & FAULT_FLAG_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) 	trace_android_vh_handle_pte_fault_end(vmf, highest_memmap_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) 	pte_unmap_unlock(vmf->pte, vmf->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773)  * By the time we get here, we already hold the mm semaphore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775)  * The mmap_lock may have been released depending on flags and our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776)  * return value.  See filemap_fault() and __lock_page_or_retry().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) 		unsigned long address, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) 	struct vm_fault vmf = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) 		.vma = vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) 		.address = address & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) 		.flags = flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) 		.pgoff = linear_page_index(vma, address),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) 		.gfp_mask = __get_fault_gfp_mask(vma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) 		.vma_flags = vma->vm_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) 		.vma_page_prot = vma->vm_page_prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) 	unsigned int dirty = flags & FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) 	struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) 	pgd = pgd_offset(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) 	p4d = p4d_alloc(mm, pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) 	if (!p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) 		return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) 	vmf.pud = pud_alloc(mm, p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) 	if (!vmf.pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) 		return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) retry_pud:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) 	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) 		ret = create_huge_pud(&vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) 		if (!(ret & VM_FAULT_FALLBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) 		pud_t orig_pud = *vmf.pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) 			/* NUMA case for anonymous PUDs would go here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) 			if (dirty && !pud_write(orig_pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) 				ret = wp_huge_pud(&vmf, orig_pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) 				if (!(ret & VM_FAULT_FALLBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) 					return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) 				huge_pud_set_accessed(&vmf, orig_pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) 	if (!vmf.pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) 		return VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) 	/* Huge pud page fault raced with pmd_alloc? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) 	if (pud_trans_unstable(vmf.pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) 		goto retry_pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) 	vmf.sequence = raw_read_seqcount(&vma->vm_sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) 	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) 		ret = create_huge_pmd(&vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) 		if (!(ret & VM_FAULT_FALLBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) 		pmd_t orig_pmd = *vmf.pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) 		if (unlikely(is_swap_pmd(orig_pmd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) 			VM_BUG_ON(thp_migration_supported() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) 					  !is_pmd_migration_entry(orig_pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) 			if (is_pmd_migration_entry(orig_pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) 				pmd_migration_entry_wait(mm, vmf.pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) 		if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) 			if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) 				return do_huge_pmd_numa_page(&vmf, orig_pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) 			if (dirty && !pmd_write(orig_pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) 				ret = wp_huge_pmd(&vmf, orig_pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) 				if (!(ret & VM_FAULT_FALLBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) 					return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) 				huge_pmd_set_accessed(&vmf, orig_pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) 	return handle_pte_fault(&vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873)  * mm_account_fault - Do page fault accountings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875)  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876)  *        of perf event counters, but we'll still do the per-task accounting to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877)  *        the task who triggered this page fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878)  * @address: the faulted address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879)  * @flags: the fault flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880)  * @ret: the fault retcode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882)  * This will take care of most of the page fault accountings.  Meanwhile, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883)  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884)  * updates.  However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885)  * still be in per-arch page fault handlers at the entry of page fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) static inline void mm_account_fault(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) 				    unsigned long address, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) 				    vm_fault_t ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) 	bool major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) 	 * We don't do accounting for some specific faults:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) 	 * - Unsuccessful faults (e.g. when the address wasn't valid).  That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) 	 *   includes arch_vma_access_permitted() failing before reaching here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) 	 *   So this is not a "this many hardware page faults" counter.  We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) 	 *   should use the hw profiling for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) 	 * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) 	 *   once they're completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) 	if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) 	 * We define the fault as a major fault when the final successful fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) 	 * handle it immediately previously).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) 	if (major)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) 		current->maj_flt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) 		current->min_flt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) 	 * If the fault is done for GUP, regs will be NULL.  We only do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) 	 * accounting for the per thread fault counters who triggered the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) 	 * fault, and we skip the perf event updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) 	if (!regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) 	if (major)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) /* This is required by vm_normal_page() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) #error "Speculative page fault handler requires CONFIG_ARCH_HAS_PTE_SPECIAL"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939)  * vm_normal_page() adds some processing which should be done while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940)  * hodling the mmap_sem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944)  * Tries to handle the page fault in a speculative way, without grabbing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945)  * mmap_sem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946)  * When VM_FAULT_RETRY is returned, the vma pointer is valid and this vma must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947)  * be checked later when the mmap_sem has been grabbed by calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948)  * can_reuse_spf_vma().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949)  * This is needed as the returned vma is kept in memory until the call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950)  * can_reuse_spf_vma() is made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) 				unsigned long address, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) 				struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) 	struct vm_fault vmf = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) 		.address = address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) 		.pgoff = linear_page_index(vma, address),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) 		.vma = vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) 		.gfp_mask = __get_fault_gfp_mask(vma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) 		.flags = flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) 	struct mempolicy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) 	pgd_t *pgd, pgdval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) 	p4d_t *p4d, p4dval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) 	pud_t pudval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) 	int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) 	/* Clear flags that may lead to release the mmap_sem to retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) 	flags &= ~(FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) 	flags |= FAULT_FLAG_SPECULATIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) 	/* rmb <-> seqlock,vma_rb_erase() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) 	seq = raw_read_seqcount(&vmf.vma->vm_sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) 	if (seq & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) 		trace_spf_vma_changed(_RET_IP_, vmf.vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) 	if (!vmf_allows_speculation(&vmf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) 	vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) 	vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) 	/* Can't call userland page fault handler in the speculative path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) 	if (unlikely(vmf.vma_flags & VM_UFFD_MISSING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) 		trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) 	if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) 		 * This could be detected by the check address against VMA's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) 		 * boundaries but we want to trace it as not supported instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) 		 * of changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) 		trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) 	if (address < READ_ONCE(vmf.vma->vm_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) 	    || READ_ONCE(vmf.vma->vm_end) <= address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) 		trace_spf_vma_changed(_RET_IP_, vmf.vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) 	if (!arch_vma_access_permitted(vmf.vma, flags & FAULT_FLAG_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) 				       flags & FAULT_FLAG_INSTRUCTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) 				       flags & FAULT_FLAG_REMOTE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) 		goto out_segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) 	/* This is one is required to check that the VMA has write access set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) 	if (flags & FAULT_FLAG_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) 		if (unlikely(!(vmf.vma_flags & VM_WRITE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) 			goto out_segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) 	} else if (unlikely(!(vmf.vma_flags & (VM_READ|VM_EXEC|VM_WRITE))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) 		goto out_segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) 	 * MPOL_INTERLEAVE implies additional checks in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) 	 * mpol_misplaced() which are not compatible with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) 	 *speculative page fault processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) 	pol = __get_vma_policy(vmf.vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) 	if (!pol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) 		pol = get_task_policy(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) 	if (!pol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) 		if (pol && pol->mode == MPOL_INTERLEAVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) 			trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) 			return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) 	 * Do a speculative lookup of the PTE entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) 	pgd = pgd_offset(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) 	pgdval = READ_ONCE(*pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) 	if (pgd_none(pgdval) || unlikely(pgd_bad(pgdval)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) 		goto out_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) 	p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) 	if (pgd_val(READ_ONCE(*pgd)) != pgd_val(pgdval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) 		goto out_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) 	p4dval = READ_ONCE(*p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) 	if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) 		goto out_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) 	vmf.pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) 	if (p4d_val(READ_ONCE(*p4d)) != p4d_val(p4dval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) 		goto out_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) 	pudval = READ_ONCE(*vmf.pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) 	if (pud_none(pudval) || unlikely(pud_bad(pudval)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) 		goto out_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) 	/* Huge pages at PUD level are not supported. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) 	if (unlikely(pud_trans_huge(pudval)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) 		goto out_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) 	vmf.pmd = pmd_offset(vmf.pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) 	if (pud_val(READ_ONCE(*vmf.pud)) != pud_val(pudval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) 		goto out_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) 	vmf.orig_pmd = READ_ONCE(*vmf.pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) 	 * pmd_none could mean that a hugepage collapse is in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) 	 * in our back as collapse_huge_page() mark it before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) 	 * invalidating the pte (which is done once the IPI is catched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) 	 * by all CPU and we have interrupt disabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) 	 * For this reason we cannot handle THP in a speculative way since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) 	 * can't safely indentify an in progress collapse operation done in our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) 	 * back on that PMD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) 	 * Regarding the order of the following checks, see comment in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) 	 * pmd_devmap_trans_unstable()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) 	if (unlikely(pmd_devmap(vmf.orig_pmd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) 		     pmd_none(vmf.orig_pmd) || pmd_trans_huge(vmf.orig_pmd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) 		     is_swap_pmd(vmf.orig_pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) 		goto out_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) 	 * The above does not allocate/instantiate page-tables because doing so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) 	 * would lead to the possibility of instantiating page-tables after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) 	 * free_pgtables() -- and consequently leaking them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) 	 * The result is that we take at least one !speculative fault per PMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) 	 * in order to instantiate it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) 	vmf.pte = pte_offset_map(vmf.pmd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) 	if (pmd_val(READ_ONCE(*vmf.pmd)) != pmd_val(vmf.orig_pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) 		pte_unmap(vmf.pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) 		vmf.pte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) 		goto out_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) 	vmf.orig_pte = READ_ONCE(*vmf.pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) 	barrier(); /* See comment in handle_pte_fault() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) 	if (pte_none(vmf.orig_pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) 		pte_unmap(vmf.pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) 		vmf.pte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) 	vmf.sequence = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) 	vmf.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) 	 * We need to re-validate the VMA after checking the bounds, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) 	 * we might have a false positive on the bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) 	if (read_seqcount_retry(&vmf.vma->vm_sequence, seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) 		trace_spf_vma_changed(_RET_IP_, vmf.vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) 	mem_cgroup_enter_user_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) 	ret = handle_pte_fault(&vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) 	mem_cgroup_exit_user_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) 	if (ret != VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) 		if (vma_is_anonymous(vmf.vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) 			count_vm_event(SPECULATIVE_PGFAULT_ANON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) 			count_vm_event(SPECULATIVE_PGFAULT_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) 	 * The task may have entered a memcg OOM situation but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) 	 * if the allocation error was handled gracefully (no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) 	 * VM_FAULT_OOM), there is no need to kill anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) 	 * Just clean up the OOM state peacefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) 	if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) 		mem_cgroup_oom_synchronize(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) out_walk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) 	trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) 	return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) out_segv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) 	trace_spf_vma_access(_RET_IP_, vmf.vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) 	return VM_FAULT_SIGSEGV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) vm_fault_t __handle_speculative_fault(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) 				unsigned long address, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) 				struct vm_area_struct **vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) 				struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) 	check_sync_rss_stat(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) 	*vma = get_vma(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) 	if (!*vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) 		return VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) 	ret = ___handle_speculative_fault(mm, address, flags, *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) 	 * If there is no need to retry, don't return the vma to the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) 	if (ret != VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) 		put_vma(*vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) 		*vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) 		mm_account_fault(regs, address, flags, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181)  * This is used to know if the vma fetch in the speculative page fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182)  * is still valid when trying the regular fault path while holding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183)  * mmap_sem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184)  * The call to put_vma(vma) must be made after checking the vma's fields, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185)  * the vma may be freed by put_vma(). In such a case it is expected that false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186)  * is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) bool can_reuse_spf_vma(struct vm_area_struct *vma, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) 	ret = !RB_EMPTY_NODE(&vma->vm_rb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) 		vma->vm_start <= address && address < vma->vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) 	put_vma(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200)  * By the time we get here, we already hold the mm semaphore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202)  * The mmap_lock may have been released depending on flags and our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203)  * return value.  See filemap_fault() and __lock_page_or_retry().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) 			   unsigned int flags, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) 	count_vm_event(PGFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) 	count_memcg_event_mm(vma->vm_mm, PGFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) 	/* do counter updates before entering really critical section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) 	check_sync_rss_stat(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) 					    flags & FAULT_FLAG_INSTRUCTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) 					    flags & FAULT_FLAG_REMOTE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) 		return VM_FAULT_SIGSEGV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) 	 * Enable the memcg OOM handling for faults triggered in user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) 	 * space.  Kernel faults are handled more gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) 	if (flags & FAULT_FLAG_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) 		mem_cgroup_enter_user_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) 	if (unlikely(is_vm_hugetlb_page(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) 		ret = __handle_mm_fault(vma, address, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) 	if (flags & FAULT_FLAG_USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) 		mem_cgroup_exit_user_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) 		 * The task may have entered a memcg OOM situation but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) 		 * if the allocation error was handled gracefully (no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) 		 * VM_FAULT_OOM), there is no need to kill anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) 		 * Just clean up the OOM state peacefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) 			mem_cgroup_oom_synchronize(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) 	mm_account_fault(regs, address, flags, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) EXPORT_SYMBOL_GPL(handle_mm_fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) #ifndef __PAGETABLE_P4D_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255)  * Allocate p4d page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256)  * We've already handled the fast-path in-line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) 	p4d_t *new = p4d_alloc_one(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) 	smp_wmb(); /* See comment in __pte_alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) 	spin_lock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) 	if (pgd_present(*pgd))		/* Another has populated it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) 		p4d_free(mm, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) 		pgd_populate(mm, pgd, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) 	spin_unlock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) #endif /* __PAGETABLE_P4D_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) #ifndef __PAGETABLE_PUD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278)  * Allocate page upper directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279)  * We've already handled the fast-path in-line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) 	pud_t *new = pud_alloc_one(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) 	smp_wmb(); /* See comment in __pte_alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) 	spin_lock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) 	if (!p4d_present(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) 		mm_inc_nr_puds(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) 		p4d_populate(mm, p4d, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) 	} else	/* Another has populated it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) 		pud_free(mm, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) 	spin_unlock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) #endif /* __PAGETABLE_PUD_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) #ifndef __PAGETABLE_PMD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302)  * Allocate page middle directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303)  * We've already handled the fast-path in-line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) 	pmd_t *new = pmd_alloc_one(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) 	smp_wmb(); /* See comment in __pte_alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) 	ptl = pud_lock(mm, pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) 	if (!pud_present(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) 		mm_inc_nr_pmds(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) 		pud_populate(mm, pud, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) 	} else	/* Another has populated it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) 		pmd_free(mm, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) 	spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) #endif /* __PAGETABLE_PMD_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) 			  struct mmu_notifier_range *range, pte_t **ptepp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) 			  pmd_t **pmdpp, spinlock_t **ptlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) 	pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) 	pgd = pgd_offset(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) 	p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) 	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) 	pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) 	pmd = pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) 	VM_BUG_ON(pmd_trans_huge(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) 	if (pmd_huge(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) 		if (!pmdpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) 		if (range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) 			mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) 						NULL, mm, address & PMD_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) 						(address & PMD_MASK) + PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) 			mmu_notifier_invalidate_range_start(range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) 		*ptlp = pmd_lock(mm, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) 		if (pmd_huge(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) 			*pmdpp = pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) 		spin_unlock(*ptlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) 		if (range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) 			mmu_notifier_invalidate_range_end(range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) 	if (range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) 		mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) 					address & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) 					(address & PAGE_MASK) + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) 		mmu_notifier_invalidate_range_start(range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) 	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) 	if (!pte_present(*ptep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) 	*ptepp = ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) 	pte_unmap_unlock(ptep, *ptlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) 	if (range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) 		mmu_notifier_invalidate_range_end(range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393)  * follow_pte - look up PTE at a user virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394)  * @mm: the mm_struct of the target address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395)  * @address: user virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396)  * @ptepp: location to store found PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397)  * @ptlp: location to store the lock for the PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399)  * On a successful return, the pointer to the PTE is stored in @ptepp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400)  * the corresponding lock is taken and its location is stored in @ptlp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401)  * The contents of the PTE are only stable until @ptlp is released;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402)  * any further use, if any, must be protected against invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403)  * with MMU notifiers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405)  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406)  * should be taken for read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408)  * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409)  * it is not a good general-purpose API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411)  * Return: zero on success, -ve otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) int follow_pte(struct mm_struct *mm, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) 	       pte_t **ptepp, spinlock_t **ptlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) 	return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) EXPORT_SYMBOL_GPL(follow_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421)  * follow_pfn - look up PFN at a user virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422)  * @vma: memory mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423)  * @address: user virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424)  * @pfn: location to store found PFN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426)  * Only IO mappings and raw PFN mappings are allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428)  * This function does not allow the caller to read the permissions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429)  * of the PTE.  Do not use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431)  * Return: zero and the pfn at @pfn on success, -ve otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) int follow_pfn(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) 	unsigned long *pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) 	pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) 	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) 	*pfn = pte_pfn(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) 	pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) EXPORT_SYMBOL(follow_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) #ifdef CONFIG_HAVE_IOREMAP_PROT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) int follow_phys(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) 		unsigned long address, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) 		unsigned long *prot, resource_size_t *phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) 	pte_t *ptep, pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) 	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) 	pte = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) 	if ((flags & FOLL_WRITE) && !pte_write(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) 	*prot = pgprot_val(pte_pgprot(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) 	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) 	pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) 			void *buf, int len, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) 	resource_size_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) 	unsigned long prot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) 	void __iomem *maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) 	int offset = addr & (PAGE_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) 	if (follow_phys(vma, addr, write, &prot, &phys_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) 	if (!maddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) 	if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) 		memcpy_toio(maddr + offset, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) 		memcpy_fromio(buf, maddr + offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) 	iounmap(maddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) EXPORT_SYMBOL_GPL(generic_access_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508)  * Access another process' address space as given in mm.  If non-NULL, use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509)  * given task for page fault accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) 		unsigned long addr, void *buf, int len, unsigned int gup_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) 	void *old_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) 	int write = gup_flags & FOLL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) 	if (mmap_read_lock_killable(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) 	/* ignore errors, just check how much was successfully transferred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) 	while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) 		int bytes, ret, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) 		void *maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) 		struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) 		ret = get_user_pages_remote(mm, addr, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) 				gup_flags, &page, &vma, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) 		if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) #ifndef CONFIG_HAVE_IOREMAP_PROT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) 			 * we can access using slightly different code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) 			vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) 			if (!vma || vma->vm_start > addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) 			if (vma->vm_ops && vma->vm_ops->access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) 				ret = vma->vm_ops->access(vma, addr, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) 							  len, write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) 			if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) 			bytes = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) 			bytes = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) 			offset = addr & (PAGE_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) 			if (bytes > PAGE_SIZE-offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) 				bytes = PAGE_SIZE-offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) 			maddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) 			if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) 				copy_to_user_page(vma, page, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) 						  maddr + offset, buf, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) 				set_page_dirty_lock(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) 				copy_from_user_page(vma, page, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) 						    buf, maddr + offset, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) 			kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) 			put_user_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) 		len -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) 		buf += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) 		addr += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) 	return buf - old_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575)  * access_remote_vm - access another process' address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576)  * @mm:		the mm_struct of the target address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577)  * @addr:	start address to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578)  * @buf:	source or destination buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579)  * @len:	number of bytes to transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580)  * @gup_flags:	flags modifying lookup behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582)  * The caller must hold a reference on @mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584)  * Return: number of bytes copied from source to destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) int access_remote_vm(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) 		void *buf, int len, unsigned int gup_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) 	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593)  * Access another process' address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594)  * Source/target buffer must be kernel space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595)  * Do not walk the page table directly, use get_user_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) int access_process_vm(struct task_struct *tsk, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) 		void *buf, int len, unsigned int gup_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) 	mm = get_task_mm(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) 	if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) 	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) 	mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) EXPORT_SYMBOL_GPL(access_process_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616)  * Print the name of a VMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) void print_vma_addr(char *prefix, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) 	 * we might be running from an atomic context so we cannot sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) 	if (!mmap_read_trylock(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) 	vma = find_vma(mm, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) 	if (vma && vma->vm_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) 		struct file *f = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) 		char *buf = (char *)__get_free_page(GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) 		if (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) 			char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) 			p = file_path(f, buf, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) 			if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) 				p = "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) 			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) 					vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) 					vma->vm_end - vma->vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) 			free_page((unsigned long)buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) void __might_fault(const char *file, int line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) 	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) 	 * holding the mmap_lock, this is safe because kernel memory doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) 	 * get paged out, therefore we'll never actually fault, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) 	 * below annotations will generate false positives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) 	if (uaccess_kernel())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) 	if (pagefault_disabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) 	__might_sleep(file, line, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) 	if (current->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) 		might_lock_read(&current->mm->mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) EXPORT_SYMBOL(__might_fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672)  * Process all subpages of the specified huge page with the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673)  * operation.  The target subpage will be processed last to keep its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674)  * cache lines hot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) static inline void process_huge_page(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) 	unsigned long addr_hint, unsigned int pages_per_huge_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) 	void (*process_subpage)(unsigned long addr, int idx, void *arg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) 	void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) 	int i, n, base, l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) 	unsigned long addr = addr_hint &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) 	/* Process target subpage last to keep its cache lines hot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) 	n = (addr_hint - addr) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) 	if (2 * n <= pages_per_huge_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) 		/* If target subpage in first half of huge page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) 		base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) 		l = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) 		/* Process subpages at the end of huge page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) 		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) 			process_subpage(addr + i * PAGE_SIZE, i, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) 		/* If target subpage in second half of huge page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) 		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) 		l = pages_per_huge_page - n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) 		/* Process subpages at the begin of huge page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) 		for (i = 0; i < base; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) 			process_subpage(addr + i * PAGE_SIZE, i, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) 	 * Process remaining subpages in left-right-left-right pattern
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) 	 * towards the target subpage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) 	for (i = 0; i < l; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) 		int left_idx = base + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) 		int right_idx = base + 2 * l - 1 - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) 		process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) 		process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) static void clear_gigantic_page(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) 				unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) 				unsigned int pages_per_huge_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) 	struct page *p = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) 	for (i = 0; i < pages_per_huge_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) 	     i++, p = mem_map_next(p, page, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) 		clear_user_highpage(p, addr + i * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) static void clear_subpage(unsigned long addr, int idx, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) 	struct page *page = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) 	clear_user_highpage(page + idx, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) void clear_huge_page(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) 		     unsigned long addr_hint, unsigned int pages_per_huge_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) 	unsigned long addr = addr_hint &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) 		clear_gigantic_page(page, addr, pages_per_huge_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) 	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) static void copy_user_gigantic_page(struct page *dst, struct page *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) 				    unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) 				    struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) 				    unsigned int pages_per_huge_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) 	struct page *dst_base = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) 	struct page *src_base = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) 	for (i = 0; i < pages_per_huge_page; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) 		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) 		dst = mem_map_next(dst, dst_base, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) 		src = mem_map_next(src, src_base, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) struct copy_subpage_arg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) 	struct page *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) 	struct page *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) static void copy_subpage(unsigned long addr, int idx, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) 	struct copy_subpage_arg *copy_arg = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) 	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) 			   addr, copy_arg->vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) void copy_user_huge_page(struct page *dst, struct page *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) 			 unsigned long addr_hint, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) 			 unsigned int pages_per_huge_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) 	unsigned long addr = addr_hint &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) 	struct copy_subpage_arg arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) 		.dst = dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) 		.src = src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) 		.vma = vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) 		copy_user_gigantic_page(dst, src, addr, vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) 					pages_per_huge_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) 	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) long copy_huge_page_from_user(struct page *dst_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) 				const void __user *usr_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) 				unsigned int pages_per_huge_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) 				bool allow_pagefault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) 	void *src = (void *)usr_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) 	void *page_kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) 	unsigned long i, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) 	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) 	struct page *subpage = dst_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) 	for (i = 0; i < pages_per_huge_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) 	     i++, subpage = mem_map_next(subpage, dst_page, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) 		if (allow_pagefault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) 			page_kaddr = kmap(subpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) 			page_kaddr = kmap_atomic(subpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) 		rc = copy_from_user(page_kaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) 				(const void __user *)(src + i * PAGE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) 				PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) 		if (allow_pagefault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) 			kunmap(subpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) 			kunmap_atomic(page_kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) 		ret_val -= (PAGE_SIZE - rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) 	return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) static struct kmem_cache *page_ptl_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) void __init ptlock_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) 			SLAB_PANIC, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) bool ptlock_alloc(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) 	if (!ptl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) 	page->ptl = ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) void ptlock_free(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) 	kmem_cache_free(page_ptl_cachep, page->ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) #endif