^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_MMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_MMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * x86 has arch-specific MMU state beyond what lives in mm_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) typedef struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * ctx_id uniquely identifies this mm_struct. A ctx_id will never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * be reused, and zero is not a valid ctx_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) u64 ctx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Any code that needs to do any sort of TLB flushing for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * mm will first make its changes to the page tables, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * increment tlb_gen, then flush. This lets the low-level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * flushing code keep track of what needs flushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * This is not used on Xen PV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) atomic64_t tlb_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #ifdef CONFIG_MODIFY_LDT_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct rw_semaphore ldt_usr_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct ldt_struct *ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* True if mm supports a task running in 32 bit compatibility mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned short ia32_compat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) void __user *vdso; /* vdso base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) const struct vdso_image *vdso_image; /* vdso image in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * One bit per protection key says whether userspace can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * use it or not. protected by mmap_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u16 pkey_allocation_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) s16 execute_only_pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) } mm_context_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define INIT_MM_CONTEXT(mm) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) .context = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) .ctx_id = 1, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) .lock = __MUTEX_INITIALIZER(mm.context.lock), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void leave_mm(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define leave_mm leave_mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #endif /* _ASM_X86_MMU_H */