^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __ASM_MMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define __ASM_MMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define USER_ASID_BIT 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define TTBR_ASID_MASK (UL(0xffff) << 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) typedef struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) atomic64_t id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) void *sigpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) refcount_t pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void *vdso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) } mm_context_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * We use atomic64_read() here because the ASID for an 'mm_struct' can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * be reallocated when scheduling one of its threads following a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * rollover event (see new_context() and flush_context()). In this case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * may use a stale ASID. This is fine in principle as the new ASID is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * guaranteed to be clean in the TLB, but the TLBI routines have to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * care to handle the following race:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * CPU 0 CPU 1 CPU 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * // ptep_clear_flush(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * xchg_relaxed(pte, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * DSB ISHST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * old = ASID(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * | <rollover>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * | new = new_context(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * \-----------------> atomic_set(mm->context.id, new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * cpu_switch_mm(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * // Hardware walk of pte using new ASID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * TLBI(old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * written by CPU 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline bool arm64_kernel_unmapped_at_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) extern void arm64_memblock_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) extern void paging_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) extern void bootmem_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) extern void init_mem_pgprot(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned long virt, phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) pgprot_t prot, bool page_mappings_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) extern void mark_linear_text_alias_ro(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) extern bool kaslr_requires_kpti(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define INIT_MM_CONTEXT(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .pgd = init_pg_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #endif /* !__ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #endif