^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/nospec-branch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "mm_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #ifdef CONFIG_PARAVIRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) # define STATIC_NOPV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) # define STATIC_NOPV static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) # define __flush_tlb_local native_flush_tlb_local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) # define __flush_tlb_global native_flush_tlb_global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) # define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) # define __flush_tlb_others(msk, info) native_flush_tlb_others(msk, info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * TLB flushing, formerly SMP-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * c/o Linus Torvalds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * These mean you can really definitely utterly forget about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * writing to user space from interrupts. (Its not allowed anyway).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Optimizations Manfred Spraul <manfred@colorfullife.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * More scalable flush, from Andi Kleen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * stored in cpu_tlb_state.last_user_mm_ibpb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define LAST_USER_MM_IBPB 0x1UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * The x86 feature is called PCID (Process Context IDentifier). It is similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * to what is traditionally called ASID on the RISC processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * We don't use the traditional ASID implementation, where each process/mm gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * its own ASID and flush/restart when we run out of ASID space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * that came by on this CPU, allowing cheaper switch_mm between processes on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * We end up with different spaces for different things. To avoid confusion we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * use different names for each of them:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * ASID - [0, TLB_NR_DYN_ASIDS-1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * the canonical identifier for an mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * kPCID - [1, TLB_NR_DYN_ASIDS]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * the value we write into the PCID part of CR3; corresponds to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * ASID+1, because PCID 0 is special.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * for KPTI each mm has two address spaces and thus needs two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * PCID values, but we can still do with a single ASID denomination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * for each mm. Corresponds to kPCID + 2048.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* There are 12 bits of space for ASIDS in CR3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define CR3_HW_ASID_BITS 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * user/kernel switches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #ifdef CONFIG_PAGE_TABLE_ISOLATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) # define PTI_CONSUMED_PCID_BITS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) # define PTI_CONSUMED_PCID_BITS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * for them being zero-based. Another -1 is because PCID 0 is reserved for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * use by non-PCID-aware users.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Given @asid, compute kPCID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline u16 kern_pcid(u16 asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifdef CONFIG_PAGE_TABLE_ISOLATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Make sure that the dynamic ASID space does not confict with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * bit we are using to switch between user and kernel ASIDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * The ASID being passed in here should have respected the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * MAX_ASID_AVAILABLE and thus never have the switch bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * The dynamically-assigned ASIDs that get passed in are small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * so do not bother to clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * If PCID is on, ASID-aware code paths put the ASID+1 into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * PCID bits. This serves two purposes. It prevents a nasty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * situation in which PCID-unaware code saves CR3, loads some other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * value (with PCID == 0), and then restores CR3, thus corrupting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * the TLB for ASID 0 if the saved ASID was nonzero. It also means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * that any bugs involving loading a PCID-enabled CR3 with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * CR4.PCIDE off will trigger deterministically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return asid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Given @asid, compute uPCID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static inline u16 user_pcid(u16 asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u16 ret = kern_pcid(asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #ifdef CONFIG_PAGE_TABLE_ISOLATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (static_cpu_has(X86_FEATURE_PCID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return __sme_pa(pgd) | kern_pcid(asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) VM_WARN_ON_ONCE(asid != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return __sme_pa(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Use boot_cpu_has() instead of this_cpu_has() as this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * might be called during early boot. This should work even after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * boot because all CPU's the have same capabilities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * We get here when we do something requiring a TLB invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * but could not go invalidate all of the contexts. We do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * necessary invalidation by clearing out the 'ctx_id' which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * forces a TLB flush when the context is loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void clear_asid_other(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u16 asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * This is only expected to be set if we have disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * kernel _PAGE_GLOBAL pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!static_cpu_has(X86_FEATURE_PTI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Do not need to flush the current asid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Make sure the next time we go to switch to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * this asid, we do a flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) this_cpu_write(cpu_tlbstate.invalidate_other, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u16 *new_asid, bool *need_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u16 asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!static_cpu_has(X86_FEATURE_PCID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) *new_asid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *need_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (this_cpu_read(cpu_tlbstate.invalidate_other))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) clear_asid_other();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) next->context.ctx_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) *new_asid = asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) next_tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * We don't currently own an ASID slot on this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * Allocate a slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (*new_asid >= TLB_NR_DYN_ASIDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) *new_asid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) this_cpu_write(cpu_tlbstate.next_asid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *need_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Given an ASID, flush the corresponding user ASID. We can delay this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * until the next time we switch to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * See SWITCH_TO_USER_CR3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static inline void invalidate_user_asid(u16 asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* There is no user ASID if address space separation is off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * We only have a single ASID if PCID is off and the CR3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * write will have flushed it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!cpu_feature_enabled(X86_FEATURE_PCID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!static_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) __set_bit(kern_pcid(asid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned long new_mm_cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (need_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) invalidate_user_asid(new_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) new_mm_cr3 = build_cr3(pgdir, new_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Caution: many callers of this function expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * that load_cr3() is serializing and orders TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * fills with respect to the mm_cpumask writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) write_cr3(new_mm_cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void leave_mm(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * It's plausible that we're in lazy TLB mode while our mm is init_mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * If so, our callers still expect us to flush the TLB, but there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * aren't any user TLB entries in init_mm to worry about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * This needs to happen before any other sanity checks due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * intel_idle's shenanigans.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (loaded_mm == &init_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Warn if we're not lazy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) switch_mm(NULL, &init_mm, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) EXPORT_SYMBOL_GPL(leave_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void switch_mm(struct mm_struct *prev, struct mm_struct *next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) switch_mm_irqs_off(prev, next, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned long next_tif = task_thread_info(next)->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return (unsigned long)next->mm | ibpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void cond_ibpb(struct task_struct *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!next || !next->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * Both, the conditional and the always IBPB mode use the mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * pointer to avoid the IBPB when switching between tasks of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * same process. Using the mm pointer instead of mm->context.ctx_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * opens a hypothetical hole vs. mm_struct reuse, which is more or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * less impossible to control by an attacker. Aside of that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * would only affect the first schedule so the theoretically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * exposed data is not really interesting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (static_branch_likely(&switch_mm_cond_ibpb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned long prev_mm, next_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * This is a bit more complex than the always mode because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * it has to handle two cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * 1) Switch from a user space task (potential attacker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * which has TIF_SPEC_IB set to a user space task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * (potential victim) which has TIF_SPEC_IB not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * 2) Switch from a user space task (potential attacker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * which has TIF_SPEC_IB not set to a user space task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * (potential victim) which has TIF_SPEC_IB set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * This could be done by unconditionally issuing IBPB when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * a task which has TIF_SPEC_IB set is either scheduled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * or out. Though that results in two flushes when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * - the same user space task is scheduled out and later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * scheduled in again and only a kernel thread ran in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * between.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * - a user space task belonging to the same process is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * scheduled in after a kernel thread ran in between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * - a user space task belonging to the same process is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * scheduled in immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Optimize this with reasonably small overhead for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * above cases. Mangle the TIF_SPEC_IB bit into the mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * pointer of the incoming task which is stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * cpu_tlbstate.last_user_mm_ibpb for comparison.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) next_mm = mm_mangle_tif_spec_ib(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Issue IBPB only if the mm's are different and one or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * both have the IBPB bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (next_mm != prev_mm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) (next_mm | prev_mm) & LAST_USER_MM_IBPB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) indirect_branch_prediction_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (static_branch_unlikely(&switch_mm_always_ibpb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Only flush when switching to a user space task with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * different context than the user space task which ran
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * last on this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) indirect_branch_prediction_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #ifdef CONFIG_PERF_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline void cr4_update_pce_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (static_branch_unlikely(&rdpmc_always_available_key) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) (!static_branch_unlikely(&rdpmc_never_available_key) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) atomic_read(&mm->context.perf_rdpmc_allowed)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) cr4_set_bits_irqsoff(X86_CR4_PCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) cr4_clear_bits_irqsoff(X86_CR4_PCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) void cr4_update_pce(void *ignored)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static inline void cr4_update_pce_mm(struct mm_struct *mm) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) u64 next_tlb_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) bool need_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u16 new_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * NB: The scheduler will call us with prev == next when switching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * from lazy TLB mode to normal mode if active_mm isn't changing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * When this happens, we don't assume that CR3 (and hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * cpu_tlbstate.loaded_mm) matches next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* We don't want flush_tlb_func_* to run concurrently with us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (IS_ENABLED(CONFIG_PROVE_LOCKING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) WARN_ON_ONCE(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Verify that CR3 is what we think it is. This will catch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * hypothetical buggy code that directly switches to swapper_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * without going through leave_mm() / switch_mm_irqs_off() or that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * does something like write_cr3(read_cr3_pa()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * isn't free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * If we were to BUG here, we'd be very likely to kill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * the system so hard that we don't see the call trace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * Try to recover instead by ignoring the error and doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * a global flush to minimize the chance of corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * (This is far from being a fully correct recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Architecturally, the CPU could prefetch something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * back into an incorrect ASID slot and leave it there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * to cause trouble down the road. It's better than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * nothing, though.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) __flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) this_cpu_write(cpu_tlbstate.is_lazy, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * The membarrier system call requires a full memory barrier and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * core serialization before returning to user-space, after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * storing to rq->curr, when changing mm. This is because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * membarrier() sends IPIs to all CPUs that are in the target mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * to make them issue memory barriers. However, if another CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * switches to/from the target mm concurrently with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * membarrier(), it can cause that CPU not to receive an IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * when it really should issue a memory barrier. Writing to CR3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * provides that full memory barrier and core serializing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (real_prev == next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) next->context.ctx_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * Even in lazy TLB mode, the CPU should stay set in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * mm_cpumask. The TLB shootdown code can figure out from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * from cpu_tlbstate.is_lazy whether or not to send an IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (WARN_ON_ONCE(real_prev != &init_mm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) !cpumask_test_cpu(cpu, mm_cpumask(next))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) cpumask_set_cpu(cpu, mm_cpumask(next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * If the CPU is not in lazy TLB mode, we are just switching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * from one thread in a process to another thread in the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * process. No TLB flush required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!was_lazy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Read the tlb_gen to check whether a flush is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * If the TLB is up to date, just use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * The barrier synchronizes with the tlb_gen increment in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * the TLB shootdown code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) next_tlb_gen = atomic64_read(&next->context.tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) next_tlb_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * TLB contents went out of date while we were in lazy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * mode. Fall through to the TLB switching code below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) new_asid = prev_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) need_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Avoid user/user BTB poisoning by flushing the branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * predictor when switching between processes. This stops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * one process from doing Spectre-v2 attacks on another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) cond_ibpb(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * Stop remote flushes for the previous mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * Skip kernel threads; we never send init_mm TLB flushing IPIs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * but the bitmap manipulation can cause cache line contention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (real_prev != &init_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) mm_cpumask(real_prev)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Start remote flushes and then read tlb_gen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (next != &init_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) cpumask_set_cpu(cpu, mm_cpumask(next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) next_tlb_gen = atomic64_read(&next->context.tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* Let nmi_uaccess_okay() know that we're changing CR3. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (need_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) load_new_mm_cr3(next->pgd, new_asid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* The new ASID is already up to date. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) load_new_mm_cr3(next->pgd, new_asid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* Make sure we write CR3 before loaded_mm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) this_cpu_write(cpu_tlbstate.loaded_mm, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (next != real_prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) cr4_update_pce_mm(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) switch_ldt(real_prev, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * Please ignore the name of this function. It should be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * switch_to_kernel_thread().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * enter_lazy_tlb() is a hint from the scheduler that we are entering a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * kernel thread or other context without an mm. Acceptable implementations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * include doing nothing whatsoever, switching to init_mm, or various clever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * lazy tricks to try to minimize TLB flushes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * The scheduler reserves the right to call enter_lazy_tlb() several times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * in a row. It will notify us that we're going back to a real mm by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * calling switch_mm_irqs_off().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) this_cpu_write(cpu_tlbstate.is_lazy, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * Call this when reinitializing a CPU. It fixes the following potential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * problems:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * - The ASID changed from what cpu_tlbstate thinks it is (most likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * because the CPU was taken down and came back up with CR3's PCID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * bits clear. CPU hotplug can do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * - The TLB contains junk in slots corresponding to inactive ASIDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * - The CPU went so far out to lunch that it may have missed a TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) void initialize_tlbstate_and_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) unsigned long cr3 = __read_cr3();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* Assert that CR3 already references the right mm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * doesn't work like other CR4 bits because it can only be set from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * long mode.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) !(cr4_read_shadow() & X86_CR4_PCIDE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* Force ASID 0 and force a TLB flush. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) write_cr3(build_cr3(mm->pgd, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* Reinitialize tlbstate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) this_cpu_write(cpu_tlbstate.next_asid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * flush_tlb_func_common()'s memory ordering requirement is that any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * TLB fills that happen after we flush the TLB are ordered after we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * read active_mm's tlb_gen. We don't need any explicit barriers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * because all x86 flush operations are serializing and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * atomic64_read operation won't be reordered by the compiler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static void flush_tlb_func_common(const struct flush_tlb_info *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) bool local, enum tlb_flush_reason reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * We have three different tlb_gen values in here. They are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * - mm_tlb_gen: the latest generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * - local_tlb_gen: the generation that this CPU has already caught
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * up to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * - f->new_tlb_gen: the generation that the requester of the flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * wants us to catch up to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* This code cannot presently handle being reentered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) VM_WARN_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (unlikely(loaded_mm == &init_mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) loaded_mm->context.ctx_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (this_cpu_read(cpu_tlbstate.is_lazy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * We're in lazy mode. We need to at least flush our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * paging-structure cache to avoid speculatively reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * garbage into our TLB. Since switching to init_mm is barely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * slower than a minimal flush, just switch to init_mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * This should be rare, with native_flush_tlb_others skipping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * IPIs to lazy TLB mode CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) switch_mm_irqs_off(NULL, &init_mm, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (unlikely(local_tlb_gen == mm_tlb_gen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * There's nothing to do: we're already up to date. This can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * happen if two concurrent flushes happen -- the first flush to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * be handled can catch us all the way up, leaving no work for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * the second flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) trace_tlb_flush(reason, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * If we get to this point, we know that our TLB is out of date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * This does not strictly imply that we need to flush (it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * possible that f->new_tlb_gen <= local_tlb_gen), but we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * going to need to flush in the very near future, so we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * as well get it over with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * The only question is whether to do a full or partial flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * We do a partial flush if requested and two extra conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * we've always done all needed flushes to catch up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * local_tlb_gen. If, for example, local_tlb_gen == 2 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * f->new_tlb_gen == 3, then we know that the flush needed to bring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * us up to date for tlb_gen 3 is the partial flush we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * As an example of why this check is needed, suppose that there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * are two concurrent flushes. The first is a full flush that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * changes context.tlb_gen from 1 to 2. The second is a partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * flush that changes context.tlb_gen from 2 to 3. If they get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * processed on this CPU in reverse order, we'll see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * 3, we'd be break the invariant: we'd update local_tlb_gen above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * 1 without the full flush that's needed for tlb_gen 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * Partial TLB flushes are not all that much cheaper than full TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * flushes, so it seems unlikely that it would be a performance win
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * to do a partial flush if that won't bring our TLB fully up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * date. By doing a full flush instead, we can increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * local_tlb_gen all the way to mm_tlb_gen and we can probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * avoid another flush in the very near future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (f->end != TLB_FLUSH_ALL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) f->new_tlb_gen == local_tlb_gen + 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) f->new_tlb_gen == mm_tlb_gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Partial flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) unsigned long addr = f->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) while (addr < f->end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) flush_tlb_one_user(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) addr += 1UL << f->stride_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) trace_tlb_flush(reason, nr_invalidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* Full flush. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) flush_tlb_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) trace_tlb_flush(reason, TLB_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* Both paths above update our state to mm_tlb_gen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) const struct flush_tlb_info *f = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) flush_tlb_func_common(f, true, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static void flush_tlb_func_remote(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) const struct flush_tlb_info *f = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) inc_irq_stat(irq_tlb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static bool tlb_is_not_lazy(int cpu, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return !per_cpu(cpu_tlbstate.is_lazy, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) const struct flush_tlb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (info->end == TLB_FLUSH_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) trace_tlb_flush(TLB_REMOTE_SEND_IPI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) (info->end - info->start) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * If no page tables were freed, we can skip sending IPIs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * CPUs in lazy TLB mode. They will flush the CPU themselves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * at the next context switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * However, if page tables are getting freed, we need to send the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * IPI everywhere, to prevent CPUs in lazy TLB mode from tripping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * up on the new contents of what used to be page tables, while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * doing a speculative memory access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (info->freed_tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) smp_call_function_many(cpumask, flush_tlb_func_remote,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) (void *)info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) (void *)info, 1, cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) void flush_tlb_others(const struct cpumask *cpumask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) const struct flush_tlb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) __flush_tlb_others(cpumask, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * See Documentation/x86/tlb.rst for details. We choose 33
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * because it is large enough to cover the vast majority (at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * least 95%) of allocations, and is small enough that we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * confident it will not cause too much overhead. Each single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * flush is about 100 ns, so this caps the maximum overhead at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * _about_ 3,000 ns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * This is in units of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) unsigned int stride_shift, bool freed_tables,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) u64 new_tlb_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * Ensure that the following code is non-reentrant and flush_tlb_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * is not overwritten. This means no TLB flushing is initiated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * interrupt handlers and machine-check exception handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) info->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) info->end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) info->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) info->stride_shift = stride_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) info->freed_tables = freed_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) info->new_tlb_gen = new_tlb_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static inline void put_flush_tlb_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* Complete reentrency prevention checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) this_cpu_dec(flush_tlb_info_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) unsigned long end, unsigned int stride_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) bool freed_tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct flush_tlb_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) u64 new_tlb_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* Should we flush just the requested range? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if ((end == TLB_FLUSH_ALL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) end = TLB_FLUSH_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* This is also a barrier that synchronizes with switch_mm(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) new_tlb_gen = inc_mm_tlb_gen(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) new_tlb_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) lockdep_assert_irqs_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) flush_tlb_others(mm_cpumask(mm), info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) put_flush_tlb_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) static void do_flush_tlb_all(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) __flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) void flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) on_each_cpu(do_flush_tlb_all, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static void do_kernel_range_flush(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct flush_tlb_info *f = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* flush range by one by one 'invlpg' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) flush_tlb_one_kernel(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) void flush_tlb_kernel_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Balance as user space task's flush, a bit conservative */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (end == TLB_FLUSH_ALL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) on_each_cpu(do_flush_tlb_all, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct flush_tlb_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) on_each_cpu(do_kernel_range_flush, info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) put_flush_tlb_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * This can be used from process context to figure out what the value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * CR3 is without needing to do a (slow) __read_cr3().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * It's intended to be used for code like KVM that sneakily changes CR3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * and needs to restore it. It needs to be used very carefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) unsigned long __get_current_cr3_fast(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) this_cpu_read(cpu_tlbstate.loaded_mm_asid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /* For now, be very restrictive about when this can be called. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) VM_WARN_ON(in_nmi() || preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) VM_BUG_ON(cr3 != __read_cr3());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) return cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) EXPORT_SYMBOL_GPL(__get_current_cr3_fast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * Flush one page in the kernel mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) void flush_tlb_one_kernel(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * paravirt equivalent. Even with PCID, this is sufficient: we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * use PCID if we also use global PTEs for the kernel mapping, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * INVLPG flushes global translations across all address spaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * If PTI is on, then the kernel is mapped with non-global PTEs, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * __flush_tlb_one_user() will flush the given address for the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * kernel address space and for its usermode counterpart, but it does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * not flush it for other address spaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) flush_tlb_one_user(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (!static_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * See above. We need to propagate the flush to all other address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * spaces. In principle, we only need to propagate it to kernelmode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * address spaces, but the extra bookkeeping we would need is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * worth it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) this_cpu_write(cpu_tlbstate.invalidate_other, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * Flush one page in the user mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (!static_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * Just use invalidate_user_asid() in case we are called early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) invalidate_user_asid(loaded_mm_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) void flush_tlb_one_user(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) __flush_tlb_one_user(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * Flush everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) STATIC_NOPV void native_flush_tlb_global(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) unsigned long cr4, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (static_cpu_has(X86_FEATURE_INVPCID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * Using INVPCID is considerably faster than a pair of writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * to CR4 sandwiched inside an IRQ flag save/restore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * Note, this works with CR4.PCIDE=0 or 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) invpcid_flush_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * Read-modify-write to CR4 - protect it from preemption and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * from interrupts. (Use the raw variant because this code can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * be called from deep inside debugging code.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) raw_local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) cr4 = this_cpu_read(cpu_tlbstate.cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /* toggle PGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) native_write_cr4(cr4 ^ X86_CR4_PGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* write old PGE again and flush TLBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) native_write_cr4(cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) raw_local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * Flush the entire current user mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) STATIC_NOPV void native_flush_tlb_local(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * Preemption or interrupts must be disabled to protect the access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * to the per CPU variable and to prevent being preempted between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * read_cr3() and write_cr3().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) WARN_ON_ONCE(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* If current->mm == NULL then the read_cr3() "borrows" an mm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) native_write_cr3(__native_read_cr3());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) void flush_tlb_local(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) __flush_tlb_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * Flush everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) void __flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * This is to catch users with enabled preemption and the PGE feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * and don't trigger the warning in __native_flush_tlb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) VM_WARN_ON_ONCE(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (boot_cpu_has(X86_FEATURE_PGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) __flush_tlb_global();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * !PGE -> !PCID (setup_pcid()), thus every flush is total.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) flush_tlb_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) EXPORT_SYMBOL_GPL(__flush_tlb_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * This means that the 'struct flush_tlb_info' that describes which mappings to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * flush is actually fixed. We therefore set a single fixed struct and use it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * arch_tlbbatch_flush().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static const struct flush_tlb_info full_flush_tlb_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) .mm = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) .start = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) .end = TLB_FLUSH_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) int cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (cpumask_test_cpu(cpu, &batch->cpumask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) lockdep_assert_irqs_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) cpumask_clear(&batch->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * Blindly accessing user memory from NMI context can be dangerous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * if we're in the middle of switching the current user task or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * switching the loaded mm. It can also be dangerous if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * interrupted some kernel code that was temporarily using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * different mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) bool nmi_uaccess_okay(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct mm_struct *current_mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) VM_WARN_ON_ONCE(!loaded_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * The condition we want to check is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * is supposed to be reasonably fast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * Instead, we check the almost equivalent but somewhat conservative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * condition below, and we rely on the fact that switch_mm_irqs_off()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (loaded_mm != current_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) char buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return simple_read_from_buffer(user_buf, count, ppos, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static ssize_t tlbflush_write_file(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) const char __user *user_buf, size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) char buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) int ceiling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) len = min(count, sizeof(buf) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (copy_from_user(buf, user_buf, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) buf[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (kstrtoint(buf, 0, &ceiling))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (ceiling < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) tlb_single_page_flush_ceiling = ceiling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static const struct file_operations fops_tlbflush = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) .read = tlbflush_read_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) .write = tlbflush_write_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) .llseek = default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static int __init create_tlb_single_page_flush_ceiling(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) arch_debugfs_dir, NULL, &fops_tlbflush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) late_initcall(create_tlb_single_page_flush_ceiling);