^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_INVPCID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_INVPCID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) static inline void __invpcid(unsigned long pcid, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) unsigned long type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) struct { u64 d[2]; } desc = { { pcid, addr } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The memory clobber is because the whole point is to invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * stale TLB entries and, especially if we're flushing global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * mappings, we don't want the compiler to reorder any subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * memory accesses before the TLB flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) asm volatile("invpcid %[desc], %[type]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) :: [desc] "m" (desc), [type] "r" (type) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define INVPCID_TYPE_INDIV_ADDR 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define INVPCID_TYPE_SINGLE_CTXT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define INVPCID_TYPE_ALL_INCL_GLOBAL 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define INVPCID_TYPE_ALL_NON_GLOBAL 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Flush all mappings for a given pcid and addr, not including globals. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static inline void invpcid_flush_one(unsigned long pcid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Flush all mappings for a given PCID, not including globals. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static inline void invpcid_flush_single_context(unsigned long pcid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Flush all mappings, including globals, for all PCIDs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static inline void invpcid_flush_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Flush all mappings for all PCIDs except globals. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline void invpcid_flush_all_nonglobals(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif /* _ASM_X86_INVPCID */