^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/special_insns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/invpcid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/pti.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/processor-flags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) void __flush_tlb_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define TLB_FLUSH_ALL -1UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) void cr4_update_irqsoff(unsigned long set, unsigned long clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long cr4_read_shadow(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* Set in this cpu's CR4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static inline void cr4_set_bits_irqsoff(unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) cr4_update_irqsoff(mask, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Clear in this cpu's CR4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline void cr4_clear_bits_irqsoff(unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) cr4_update_irqsoff(0, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Set in this cpu's CR4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static inline void cr4_set_bits(unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) cr4_set_bits_irqsoff(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Clear in this cpu's CR4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static inline void cr4_clear_bits(unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) cr4_clear_bits_irqsoff(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * 6 because 6 should be plenty and struct tlb_state will fit in two cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define TLB_NR_DYN_ASIDS 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct tlb_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u64 ctx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u64 tlb_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct tlb_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * are on. This means that it may not match current->active_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * which will contain the previous user mm when we're in lazy TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * mode even if we've already switched back to swapper_pg_dir.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * During switch_mm_irqs_off(), loaded_mm will be set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * LOADED_MM_SWITCHING during the brief interrupts-off window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * when CR3 and loaded_mm would otherwise be inconsistent. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * is for nmi_uaccess_okay()'s benefit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct mm_struct *loaded_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Last user mm for optimizing IBPB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct mm_struct *last_user_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned long last_user_mm_ibpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u16 loaded_mm_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u16 next_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * We can be in one of several states:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * - Actively using an mm. Our CPU's bit will be set in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * mm_cpumask(loaded_mm) and is_lazy == false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * - Lazily using a real mm. loaded_mm != &init_mm, our bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * is set in mm_cpumask(loaded_mm), but is_lazy == true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * We're heuristically guessing that the CR3 load we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * skipped more than makes up for the overhead added by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * lazy mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) bool is_lazy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * If set we changed the page tables in such a way that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * This tells us to go invalidate all the non-loaded ctxs[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * on the next context switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * The current ctx was kept up-to-date as it ran and does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * need to be invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) bool invalidate_other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * the corresponding user PCID needs a flush next time we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * switch to it; see SWITCH_TO_USER_CR3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned short user_pcid_flush_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Access to this CR4 shadow and to H/W CR4 is protected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * disabling interrupts when modifying either one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long cr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * This is a list of all contexts that might exist in the TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * There is one per ASID that we use, and the ASID (what the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * CPU calls PCID) is the index into ctxts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * For each context, ctx_id indicates which mm the TLB's user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * entries came from. As an invariant, the TLB will never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * contain entries that are out-of-date as when that mm reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * the tlb_gen in the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * To be clear, this means that it's legal for the TLB code to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * flush the TLB without updating tlb_gen. This can happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * (for now, at least) due to paravirt remote flushes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * NB: context 0 is a bit special, since it's also used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * various bits of init code. This is fine -- code that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * isn't aware of PCID will end up harmlessly flushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * context 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) bool nmi_uaccess_okay(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define nmi_uaccess_okay nmi_uaccess_okay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* Initialize cr4 shadow for this CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline void cr4_init_shadow(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) extern unsigned long mmu_cr4_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) extern u32 *trampoline_cr4_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) extern void initialize_tlbstate_and_flush(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * TLB flushing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * - flush_tlb_all() flushes all processes TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * - flush_tlb_mm(mm) flushes the specified mm context TLB's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * - flush_tlb_page(vma, vmaddr) flushes one page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * - flush_tlb_range(vma, start, end) flushes a range of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * ..but the i386 has somewhat limited tlb flushing capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * and page-granular flushes are available only on i486 and up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct flush_tlb_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * We support several kinds of flushes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * - Fully flush a single mm. .mm will be set, .end will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * which the IPI sender is trying to catch us up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * - Partially flush a single mm. .mm will be set, .start and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * .end will indicate the range, and .new_tlb_gen will be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * such that the changes between generation .new_tlb_gen-1 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * .new_tlb_gen are entirely contained in the indicated range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * - Fully flush all mms whose tlb_gens have been updated. .mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * will be zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u64 new_tlb_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned int stride_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bool freed_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) void flush_tlb_local(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void flush_tlb_one_user(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void flush_tlb_one_kernel(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void flush_tlb_others(const struct cpumask *cpumask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) const struct flush_tlb_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #ifdef CONFIG_PARAVIRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #include <asm/paravirt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define flush_tlb_mm(mm) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define flush_tlb_range(vma, start, end) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) flush_tlb_mm_range((vma)->vm_mm, start, end, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ((vma)->vm_flags & VM_HUGETLB) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ? huge_page_shift(hstate_vma(vma)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) : PAGE_SHIFT, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) extern void flush_tlb_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned long end, unsigned int stride_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) bool freed_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Bump the generation count. This also serves as a full barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * that synchronizes with switch_mm(): callers are required to order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * their read of mm_cpumask after their writes to the paging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return atomic64_inc_return(&mm->context.tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) inc_mm_tlb_gen(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #endif /* !MODULE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #endif /* _ASM_X86_TLBFLUSH_H */