^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2020 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __ASM_MTE_KASAN_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define __ASM_MTE_KASAN_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/mte-def.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifdef CONFIG_ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * These functions are meant to be only used from KASAN runtime through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * the arch_*() interface defined in asm/memory.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * These functions don't include system_supports_mte() checks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * as KASAN only calls them when MTE is supported and enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static inline u8 mte_get_ptr_tag(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Note: The format of KASAN tags is 0xF<x> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* Get allocation tag for the address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static inline u8 mte_get_mem_tag(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) asm(__MTE_PREAMBLE "ldg %0, [%0]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) : "+r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return mte_get_ptr_tag(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Generate a random tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline u8 mte_get_random_tag(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) asm(__MTE_PREAMBLE "irg %0, %0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) : "=r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return mte_get_ptr_tag(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static inline u64 __stg_post(u64 p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) : "+r"(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline u64 __stzg_post(u64 p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) : "+r"(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline void __dc_gva(u64 p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static inline void __dc_gzva(u64 p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Assign allocation tags for a region of memory based on the pointer tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * size must be MTE_GRANULE_SIZE aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Read DC G(Z)VA block size from the system register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) dczid = read_cpuid(DCZID_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dczid_bs = 4ul << (dczid & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dczid_dzp = (dczid >> 4) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) curr = (u64)__tag_set(addr, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) mask = dczid_bs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* STG/STZG up to the end of the first block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) end1 = curr | mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) end3 = curr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* DC GVA / GZVA in [end1, end2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) end2 = end3 & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * The following code uses STG on the first DC GVA block even if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * start address is aligned - it appears to be faster than an alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * check + conditional branch. Also, if the range size is at least 2 DC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * GVA blocks, the first two loops can use post-condition to save one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * branch each.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define SET_MEMTAG_RANGE(stg_post, dc_gva) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!dczid_dzp && size >= 2 * dczid_bs) {\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) curr = stg_post(curr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) } while (curr < end1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) dc_gva(curr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) curr += dczid_bs; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) } while (curr < end2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) while (curr < end3) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) curr = stg_post(curr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) SET_MEMTAG_RANGE(__stg_post, __dc_gva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #undef SET_MEMTAG_RANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void mte_enable_kernel_sync(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void mte_enable_kernel_async(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void mte_set_report_once(bool state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bool mte_report_once(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #else /* CONFIG_ARM64_MTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline u8 mte_get_ptr_tag(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline u8 mte_get_mem_tag(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline u8 mte_get_random_tag(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline void mte_set_mem_tag_range(void *addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u8 tag, bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline void mte_enable_kernel_sync(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static inline void mte_enable_kernel_async(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline void mte_set_report_once(bool state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static inline bool mte_report_once(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #endif /* CONFIG_ARM64_MTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif /* __ASM_MTE_KASAN_H */