Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* include/asm-generic/tlb.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *	Generic TLB shootdown code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright 2001 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Based on code from mm/memory.c Copyright Linus Torvalds and others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright 2011 Red Hat, Inc., Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #ifndef _ASM_GENERIC__TLB_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define _ASM_GENERIC__TLB_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mmu_notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/hugetlb_inline.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * Blindly accessing user memory from NMI context can be dangerous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * if we're in the middle of switching the current user task or switching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * the loaded mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #ifndef nmi_uaccess_okay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) # define nmi_uaccess_okay() true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * Generic MMU-gather implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * The mmu_gather data structure is used by the mm code to implement the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * correct and efficient ordering of freeing pages and TLB invalidations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * This correct ordering is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *  1) unhook page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *  2) TLB invalidate page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *  3) free page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * That is, we must never free a page before we have ensured there are no live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * translations left to it. Otherwise it might be possible to observe (or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * worse, change) the page content after it has been reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * The mmu_gather API consists of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *  - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *    Finish in particular will issue a (final) TLB invalidate and free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  *    all (remaining) queued pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  *    there's large holes between the VMAs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  *  - tlb_remove_table()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *    tlb_remove_table() is the basic primitive to free page-table directories
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  *    tlb_remove_page() below, for when page directories are pages and have no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *    additional constraints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *  - tlb_remove_page() / __tlb_remove_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *  - tlb_remove_page_size() / __tlb_remove_page_size()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  *    __tlb_remove_page_size() is the basic primitive that queues a page for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *    boolean indicating if the queue is (now) full and a call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  *    tlb_flush_mmu() is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  *    tlb_remove_page() and tlb_remove_page_size() imply the call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  *    tlb_flush_mmu() when required and has no return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  *  - tlb_change_page_size()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  *    call before __tlb_remove_page*() to set the current page-size; implies a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  *    possible tlb_flush_mmu() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  *                              related state, like the range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  *			whatever pages are still batched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  *  - mmu_gather::fullmm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  *    A flag set by tlb_gather_mmu() to indicate we're going to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *    the entire mm; this allows a number of optimizations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *    - We can ignore tlb_{start,end}_vma(); because we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  *      care about ranges. Everything will be shot down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  *    - (RISC) architectures that use ASIDs can cycle to a new ASID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  *      and delay the invalidation until ASID space runs out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  *  - mmu_gather::need_flush_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  *    A flag that can be set by the arch code if it wants to force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  *    flush the entire TLB irrespective of the range. For instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  *    x86-PAE needs this when changing top-level entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * And allows the architecture to provide and implement tlb_flush():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * use of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  *  - mmu_gather::start / mmu_gather::end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  *    which provides the range that needs to be flushed to cover the pages to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  *    be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  *  - mmu_gather::freed_tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  *    set when we freed page table pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  *    returns the smallest TLB entry size unmapped in this range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  * If an architecture does not provide tlb_flush() a default implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  * specified, in which case we'll default to flush_tlb_mm().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * Additionally there are a few opt-in features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  *  MMU_GATHER_PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  *  changes the size and provides mmu_gather::page_size to tlb_flush().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  *  This might be useful if your architecture has size specific TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  *  invalidation instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  *  MMU_GATHER_TABLE_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  *  for page directores (__p*_free_tlb()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  *  Useful if your architecture has non-page page directories.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  *  When used, an architecture is expected to provide __tlb_remove_table()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  *  which does the actual freeing of these pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  *  MMU_GATHER_RCU_TABLE_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  *  comment below).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  *  and therefore doesn't naturally serialize with software page-table walkers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  *  MMU_GATHER_NO_RANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  *  Use this if your architecture lacks an efficient flush_tlb_range().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  *  MMU_GATHER_NO_GATHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  *  If the option is set the mmu_gather will not track individual pages for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  *  delayed page free anymore. A platform that enables the option needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *  provide its own implementation of the __tlb_remove_page_size() function to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *  free pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  *  This is useful if your architecture already flushes TLB entries in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *  various ptep_get_and_clear() functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #ifdef CONFIG_MMU_GATHER_TABLE_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct mmu_table_batch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct rcu_head		rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	unsigned int		nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	void			*tables[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define MAX_TABLE_BATCH		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * page directories and we can use the normal page batching to free them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * This allows an architecture that does not use the linux page-tables for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * hardware to skip the TLBI when freeing page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #ifndef tlb_needs_table_invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define tlb_needs_table_invalidate() (true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #ifdef tlb_needs_table_invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #ifndef CONFIG_MMU_GATHER_NO_GATHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  * If we can't allocate a page to make a big batch of page pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * to work on, then just handle a few from the on-stack structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #define MMU_GATHER_BUNDLE	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct mmu_gather_batch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	struct mmu_gather_batch	*next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	unsigned int		nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	unsigned int		max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	struct page		*pages[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define MAX_GATHER_BATCH	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  * Limit the maximum number of mmu_gather batches to reduce a risk of soft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * lockups for non-preemptible kernels on huge machines when a lot of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * is zapped during unmapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  * 10K pages freed at once should be safe even without a preemption point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 				   int page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  * struct mmu_gather is an opaque type used by the mm code for passing around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  * any data needed by arch specific code for tlb_remove_page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct mmu_gather {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	struct mm_struct	*mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #ifdef CONFIG_MMU_GATHER_TABLE_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	struct mmu_table_batch	*batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	unsigned long		start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	unsigned long		end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 * we are in the middle of an operation to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	 * a full mm and can make some optimizations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	unsigned int		fullmm : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	 * we have performed an operation which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	 * requires a complete flush of the tlb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	unsigned int		need_flush_all : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 * we have removed page directories
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	unsigned int		freed_tables : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	 * at which levels have we cleared entries?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	unsigned int		cleared_ptes : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	unsigned int		cleared_pmds : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	unsigned int		cleared_puds : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	unsigned int		cleared_p4ds : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	unsigned int		vma_exec : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	unsigned int		vma_huge : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	unsigned int		batch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #ifndef CONFIG_MMU_GATHER_NO_GATHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	struct mmu_gather_batch *active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	struct mmu_gather_batch	local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	struct page		*__pages[MMU_GATHER_BUNDLE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	unsigned int page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) void tlb_flush_mmu(struct mmu_gather *tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static inline void __tlb_adjust_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 				      unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 				      unsigned int range_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	tlb->start = min(tlb->start, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	tlb->end = max(tlb->end, address + range_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static inline void __tlb_reset_range(struct mmu_gather *tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (tlb->fullmm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		tlb->start = tlb->end = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		tlb->start = TASK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		tlb->end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	tlb->freed_tables = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	tlb->cleared_ptes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	tlb->cleared_pmds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	tlb->cleared_puds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	tlb->cleared_p4ds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * Do not reset mmu_gather::vma_* fields here, we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 * call into tlb_start_vma() again to set them if there is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 * intermediate flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #ifdef CONFIG_MMU_GATHER_NO_RANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * When an architecture does not have efficient means of range flushing TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  * range small. We equally don't have to worry about page granularity or other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * All we need to do is issue a full flush for any !0 range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static inline void tlb_flush(struct mmu_gather *tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (tlb->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		flush_tlb_mm(tlb->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #define tlb_end_vma tlb_end_vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) #else /* CONFIG_MMU_GATHER_NO_RANGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #ifndef tlb_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #if defined(tlb_start_vma) || defined(tlb_end_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  * When an architecture does not provide its own tlb_flush() implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  * but does have a reasonably efficient flush_vma_range() implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  * use that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static inline void tlb_flush(struct mmu_gather *tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (tlb->fullmm || tlb->need_flush_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		flush_tlb_mm(tlb->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	} else if (tlb->end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		struct vm_area_struct vma = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			.vm_mm = tlb->mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 				    (tlb->vma_huge ? VM_HUGETLB : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		flush_tlb_range(&vma, tlb->start, tlb->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	 * mips-4k) flush only large pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	 * range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	 * these values the batch is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	tlb->vma_huge = is_vm_hugetlb_page(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #endif /* CONFIG_MMU_GATHER_NO_RANGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	 * Anything calling __tlb_adjust_range() also sets at least one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	 * these bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	      tlb->cleared_puds || tlb->cleared_p4ds))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	tlb_flush(tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	__tlb_reset_range(tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static inline void tlb_remove_page_size(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 					struct page *page, int page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (__tlb_remove_page_size(tlb, page, page_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		tlb_flush_mmu(tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* tlb_remove_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  *	required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static inline void tlb_change_page_size(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 						     unsigned int page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	if (tlb->page_size && tlb->page_size != page_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		if (!tlb->fullmm && !tlb->need_flush_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			tlb_flush_mmu(tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	tlb->page_size = page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	if (tlb->cleared_ptes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		return PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	if (tlb->cleared_pmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		return PMD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	if (tlb->cleared_puds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		return PUD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	if (tlb->cleared_p4ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		return P4D_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	return PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	return 1UL << tlb_get_unmap_shift(tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)  * In the case of tlb vma handling, we can optimise these away in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)  * case where we're doing a full MM flush.  When we're doing a munmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)  * the vmas are adjusted to only cover the region to be torn down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #ifndef tlb_start_vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	if (tlb->fullmm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	tlb_update_vma_flags(tlb, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	flush_cache_range(vma, vma->vm_start, vma->vm_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #ifndef tlb_end_vma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	if (tlb->fullmm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	 * the ranges growing with the unused space between consecutive VMAs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	 * this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	tlb_flush_mmu_tlbonly(tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)  * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)  * and set corresponding cleared_*.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 				     unsigned long address, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	__tlb_adjust_range(tlb, address, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	tlb->cleared_ptes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 				     unsigned long address, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	__tlb_adjust_range(tlb, address, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	tlb->cleared_pmds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 				     unsigned long address, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	__tlb_adjust_range(tlb, address, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	tlb->cleared_puds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 				     unsigned long address, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	__tlb_adjust_range(tlb, address, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	tlb->cleared_p4ds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #ifndef __tlb_remove_tlb_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)  * Record the fact that pte's were really unmapped by updating the range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)  * so we can later optimise away the tlb invalidate.   This helps when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)  * userspace is unmapping already-unmapped pages, which happens quite a lot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #define tlb_remove_tlb_entry(tlb, ptep, address)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		tlb_flush_pte_range(tlb, address, PAGE_SIZE);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		unsigned long _sz = huge_page_size(h);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		if (_sz == PMD_SIZE)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 			tlb_flush_pmd_range(tlb, address, _sz);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		else if (_sz == PUD_SIZE)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 			tlb_flush_pud_range(tlb, address, _sz);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)  * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)  * This is a nop so far, because only x86 needs it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #ifndef __tlb_remove_pmd_tlb_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)  * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)  * invalidation. This is a nop so far, because only x86 needs it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #ifndef __tlb_remove_pud_tlb_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) #define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)  * For things like page tables caches (ie caching addresses "inside" the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)  * page tables, like x86 does), for legacy reasons, flushing an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)  * individual page had better flush the page table caches behind it. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)  * is definitely how x86 works, for example. And if you have an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)  * architected non-legacy page table cache (which I'm not aware of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)  * anybody actually doing), you're going to have some architecturally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)  * explicit flushing for that, likely *separate* from a regular TLB entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)  * flush, and thus you'd need more than just some range expansion..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)  * So if we ever find an architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)  * that would want something that odd, I think it is up to that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)  * architecture to do its own odd thing, not cause pain for others
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)  * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)  * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) #ifndef pte_free_tlb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) #define pte_free_tlb(tlb, ptep, address)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		tlb_flush_pmd_range(tlb, address, PAGE_SIZE);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		tlb->freed_tables = 1;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		__pte_free_tlb(tlb, ptep, address);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) #ifndef pmd_free_tlb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) #define pmd_free_tlb(tlb, pmdp, address)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		tlb_flush_pud_range(tlb, address, PAGE_SIZE);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		tlb->freed_tables = 1;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		__pmd_free_tlb(tlb, pmdp, address);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) #ifndef pud_free_tlb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) #define pud_free_tlb(tlb, pudp, address)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		tlb_flush_p4d_range(tlb, address, PAGE_SIZE);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		tlb->freed_tables = 1;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		__pud_free_tlb(tlb, pudp, address);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) #ifndef p4d_free_tlb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) #define p4d_free_tlb(tlb, pudp, address)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		tlb->freed_tables = 1;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		__p4d_free_tlb(tlb, pudp, address);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) #endif /* _ASM_GENERIC__TLB_H */