Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Based on arch/arm/include/asm/tlbflush.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 1999-2003 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #ifndef __ASM_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define __ASM_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Raw TLBI operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * Where necessary, use the __tlbi() macro to avoid asm()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * boilerplate. Drivers and most kernel code should use the TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * management routines in preference to the macro below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * on whether a particular TLBI operation takes an argument or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * not. The macros handles invoking the asm with or without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * register argument as appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE			       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 			       "tlbi " #op "\n"				       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		   ALTERNATIVE("nop\n			nop",		       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 			       "dsb ish\n		tlbi " #op,	       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 			    : : )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE			       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			       "tlbi " #op ", %0\n"			       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		   ALTERNATIVE("nop\n			nop",		       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 			       "dsb ish\n		tlbi " #op ", %0",     \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 			    : : "r" (arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define __tlbi_user(op, arg) do {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	if (arm64_kernel_unmapped_at_el0())					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		__tlbi(op, (arg) | USER_ASID_FLAG);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /* This macro creates a properly formatted VA operand for the TLBI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define __TLBI_VADDR(addr, asid)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	({							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		unsigned long __ta = (addr) >> 12;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		__ta &= GENMASK_ULL(43, 0);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		__ta |= (unsigned long)(asid) << 48;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		__ta;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * Get translation granule of the system, which is decided by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * PAGE_SIZE.  Used by TTL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *  - 4KB	: 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *  - 16KB	: 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *  - 64KB	: 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define TLBI_TTL_TG_4K		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define TLBI_TTL_TG_16K		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define TLBI_TTL_TG_64K		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static inline unsigned long get_trans_granule(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	switch (PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	case SZ_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		return TLBI_TTL_TG_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	case SZ_16K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		return TLBI_TTL_TG_16K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	case SZ_64K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		return TLBI_TTL_TG_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * Level-based TLBI operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * When ARMv8.4-TTL exists, TLBI operations take an additional hint for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * the level at which the invalidation must take place. If the level is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * wrong, no invalidation may take place. In the case where the level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * cannot be easily determined, a 0 value for the level parameter will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * perform a non-hinted invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * For Stage-2 invalidation, use the level values provided to that effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * in asm/stage2_pgtable.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define TLBI_TTL_MASK		GENMASK_ULL(47, 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define __tlbi_level(op, addr, level) do {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	u64 arg = addr;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	    level) {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		u64 ttl = level & 3;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		ttl |= get_trans_granule() << 2;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		arg &= ~TLBI_TTL_MASK;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		arg |= FIELD_PREP(TLBI_TTL_MASK, ttl);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	__tlbi(op, arg);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define __tlbi_user_level(op, arg, level) do {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (arm64_kernel_unmapped_at_el0())				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		__tlbi_level(op, (arg | USER_ASID_FLAG), level);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * This macro creates a properly formatted VA operand for the TLB RANGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * The value bit assignments are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  * +----------+------+-------+-------+-------+----------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * |   ASID   |  TG  | SCALE |  NUM  |  TTL  |        BADDR         |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  * +-----------------+-------+-------+-------+----------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * |63      48|47  46|45   44|43   39|38   37|36                   0|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * The address range is determined by below formula:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * [BADDR, BADDR + (NUM + 1) * 2^(5*SCALE + 1) * PAGESIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define __TLBI_VADDR_RANGE(addr, asid, scale, num, ttl)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	({							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		unsigned long __ta = (addr) >> PAGE_SHIFT;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		__ta &= GENMASK_ULL(36, 0);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		__ta |= (unsigned long)(ttl) << 37;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		__ta |= (unsigned long)(num) << 39;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		__ta |= (unsigned long)(scale) << 44;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		__ta |= get_trans_granule() << 46;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		__ta |= (unsigned long)(asid) << 48;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		__ta;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* These macros are used by the TLBI RANGE feature. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define __TLBI_RANGE_PAGES(num, scale)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	((unsigned long)((num) + 1) << (5 * (scale) + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define MAX_TLBI_RANGE_PAGES		__TLBI_RANGE_PAGES(31, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * Generate 'num' values from -1 to 30 with -1 rejected by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * __flush_tlb_range() loop below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define TLBI_RANGE_MASK			GENMASK_ULL(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define __TLBI_RANGE_NUM(pages, scale)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  *	TLB Invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  *	================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * 	This header file implements the low-level TLB invalidation routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  *	(sometimes referred to as "flushing" in the kernel) for arm64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *	Every invalidation operation uses the following template:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  *	DSB ISHST	// Ensure prior page-table updates have completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *	TLBI ...	// Invalidate the TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *	DSB ISH		// Ensure the TLB invalidation has completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  *      if (invalidated kernel mappings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *		ISB	// Discard any instructions fetched from the old mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  *	The following functions form part of the "core" TLB invalidation API,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  *	as documented in Documentation/core-api/cachetlb.rst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  *	flush_tlb_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  *		Invalidate the entire TLB (kernel + user) on all CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  *	flush_tlb_mm(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *		Invalidate an entire user address space on all CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  *		The 'mm' argument identifies the ASID to invalidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  *	flush_tlb_range(vma, start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  *		Invalidate the virtual-address range '[start, end)' on all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  *		CPUs for the user address space corresponding to 'vma->mm'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  *		Note that this operation also invalidates any walk-cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  *		entries associated with translations for the specified address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  *		range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  *	flush_tlb_kernel_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  *		Same as flush_tlb_range(..., start, end), but applies to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * 		kernel mappings rather than a particular user address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  *		Whilst not explicitly documented, this function is used when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  *		unmapping pages from vmalloc/io space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  *	flush_tlb_page(vma, addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  *		Invalidate a single user mapping for address 'addr' in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  *		address space corresponding to 'vma->mm'.  Note that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  *		operation only invalidates a single, last-level page-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  *		entry and therefore does not affect any walk-caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  *	Next, we have some undocumented invalidation routines that you probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  *	don't want to call unless you know what you're doing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  *	local_flush_tlb_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  *		Same as flush_tlb_all(), but only applies to the calling CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  *	__flush_tlb_kernel_pgtable(addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  *		Invalidate a single kernel mapping for address 'addr' on all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)  *		CPUs, ensuring that any walk-cache entries associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  *		translation are also invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  *	__flush_tlb_range(vma, start, end, stride, last_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  *		Invalidate the virtual-address range '[start, end)' on all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  *		CPUs for the user address space corresponding to 'vma->mm'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  *		The invalidation operations are issued at a granularity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  *		determined by 'stride' and only affect any walk-cache entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  *		if 'last_level' is equal to false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  *	Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  *	on top of these routines, since that is our interface to the mmu_gather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  *	API as used by munmap() and friends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static inline void local_flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	dsb(nshst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	__tlbi(vmalle1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	dsb(nsh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline void flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	__tlbi(vmalle1is);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	dsb(ish);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static inline void flush_tlb_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	unsigned long asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	asid = __TLBI_VADDR(0, ASID(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	__tlbi(aside1is, asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	__tlbi_user(aside1is, asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	dsb(ish);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 					 unsigned long uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	__tlbi(vale1is, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	__tlbi_user(vale1is, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static inline void flush_tlb_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 				  unsigned long uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	flush_tlb_page_nosync(vma, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	dsb(ish);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * necessarily a performance improvement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #define MAX_TLBI_OPS	PTRS_PER_PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static inline void __flush_tlb_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 				     unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				     unsigned long stride, bool last_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 				     int tlb_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	int num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	int scale = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	unsigned long asid, addr, pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	start = round_down(start, stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	end = round_up(end, stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	pages = (end - start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	 * When not uses TLB range ops, we can handle up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	 * (MAX_TLBI_OPS - 1) pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	 * When uses TLB range ops, we can handle up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	 * (MAX_TLBI_RANGE_PAGES - 1) pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if ((!system_supports_tlb_range() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	     (end - start) >= (MAX_TLBI_OPS * stride)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	    pages >= MAX_TLBI_RANGE_PAGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		flush_tlb_mm(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	asid = ASID(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 * When the CPU does not support TLB range operations, flush the TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	 * entries one by one at the granularity of 'stride'. If the the TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	 * range ops are supported, then:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	 * 1. If 'pages' is odd, flush the first page through non-range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	 *    operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	 * 2. For remaining pages: the minimum range granularity is decided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 *    by 'scale', so multiple range TLBI operations may be required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 *    Start from scale = 0, flush the corresponding number of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	 *    ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 *    until no pages left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 * Note that certain ranges can be represented by either num = 31 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 * scale or num = 0 and scale + 1. The loop below favours the latter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	while (pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		if (!system_supports_tlb_range() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		    pages % 2 == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			addr = __TLBI_VADDR(start, asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			if (last_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 				__tlbi_level(vale1is, addr, tlb_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 				__tlbi_user_level(vale1is, addr, tlb_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 				__tlbi_level(vae1is, addr, tlb_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 				__tlbi_user_level(vae1is, addr, tlb_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			start += stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			pages -= stride >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		num = __TLBI_RANGE_NUM(pages, scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		if (num >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			addr = __TLBI_VADDR_RANGE(start, asid, scale,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 						  num, tlb_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			if (last_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 				__tlbi(rvale1is, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 				__tlbi_user(rvale1is, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 				__tlbi(rvae1is, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 				__tlbi_user(rvae1is, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			pages -= __TLBI_RANGE_PAGES(num, scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		scale++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	dsb(ish);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static inline void flush_tlb_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 				   unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	 * We cannot use leaf-only invalidation here, since we may be invalidating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	 * table entries as part of collapsing hugepages or moving page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	 * Set the tlb_level to 0 because we can not get enough information here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	start = __TLBI_VADDR(start, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	end = __TLBI_VADDR(end, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		__tlbi(vaale1is, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	dsb(ish);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * table levels (pgd/pud/pmd).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	unsigned long addr = __TLBI_VADDR(kaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	__tlbi(vaae1is, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	dsb(ish);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #endif