^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/mm/context.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static u32 asid_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static atomic64_t asid_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static unsigned long *asid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static DEFINE_PER_CPU(atomic64_t, active_asids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static DEFINE_PER_CPU(u64, reserved_asids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static cpumask_t tlb_flush_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static unsigned long max_pinned_asids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static unsigned long nr_pinned_asids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static unsigned long *pinned_asid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define ASID_FIRST_VERSION (1UL << asid_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define NUM_USER_ASIDS ASID_FIRST_VERSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define asid2idx(asid) ((asid) & ~ASID_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define idx2asid(idx) asid2idx(idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Get the ASIDBits supported by the current CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static u32 get_cpu_asid_bits(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ID_AA64MMFR0_ASID_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) switch (fld) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) smp_processor_id(), fld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) asid = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) asid = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Check if the current cpu's ASIDBits is compatible with asid_bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void verify_cpu_asid_bits(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u32 asid = get_cpu_asid_bits();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (asid < asid_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * We cannot decrease the ASID size at runtime, so panic if we support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * fewer ASID bits than the boot CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) smp_processor_id(), asid, asid_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) cpu_panic_kernel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void set_kpti_asid_bits(unsigned long *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * In case of KPTI kernel/user ASIDs are allocated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * pairs, the bottom bit distinguishes the two: if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * is set, then the ASID will map only userspace. Thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * mark even as reserved for kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) memset(map, 0xaa, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void set_reserved_asid_bits(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (pinned_asid_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) else if (arm64_kernel_unmapped_at_el0())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) set_kpti_asid_bits(asid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define asid_gen_match(asid) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void flush_context(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u64 asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Update the list of reserved ASIDs and the ASID bitmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) set_reserved_asid_bits();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * If this CPU has already been through a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * rollover, but hasn't run another task in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * the meantime, we must preserve its reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * ASID, as this is the only trace we have of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * the process it is still running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (asid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) asid = per_cpu(reserved_asids, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __set_bit(asid2idx(asid), asid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) per_cpu(reserved_asids, i) = asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Queue a TLB invalidation for each CPU to perform on next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * context-switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) cpumask_setall(&tlb_flush_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static bool check_update_reserved_asid(u64 asid, u64 newasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bool hit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Iterate over the set of reserved ASIDs looking for a match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * If we find one, then we can update our mm to use newasid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * (i.e. the same ASID in the current generation) but we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * exit the loop early, since we need to ensure that all copies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * of the old ASID are updated to reflect the mm. Failure to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * so could result in us missing the reserved ASID in a future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (per_cpu(reserved_asids, cpu) == asid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) hit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) per_cpu(reserved_asids, cpu) = newasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return hit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static u64 new_context(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static u32 cur_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u64 asid = atomic64_read(&mm->context.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u64 generation = atomic64_read(&asid_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (asid != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u64 newasid = generation | (asid & ~ASID_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * If our current ASID was active during a rollover, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * can continue to use it and this was just a false alarm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (check_update_reserved_asid(asid, newasid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return newasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * If it is pinned, we can keep using it. Note that reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * takes priority, because even if it is also pinned, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * update the generation into the reserved_asids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (refcount_read(&mm->context.pinned))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return newasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * We had a valid ASID in a previous life, so try to re-use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * it if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!__test_and_set_bit(asid2idx(asid), asid_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return newasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Allocate a free ASID. If we can't find one, take a note of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * currently active ASIDs and mark the TLBs as requiring flushes. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * always count from ASID #2 (index 1), as we use ASID #0 when setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * pairs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (asid != NUM_USER_ASIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) goto set_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* We're out of ASIDs, so increment the global generation count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) &asid_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) flush_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* We have more ASIDs than CPUs, so this will always succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) set_asid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) __set_bit(asid, asid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) cur_idx = asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return idx2asid(asid) | generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void check_and_switch_context(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u64 asid, old_active_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (system_supports_cnp())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) cpu_set_reserved_ttbr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) asid = atomic64_read(&mm->context.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * The memory ordering here is subtle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * If our active_asids is non-zero and the ASID matches the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * generation, then we update the active_asids entry with a relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * cmpxchg. Racing with a concurrent rollover means that either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * - We get a zero back from the cmpxchg and end up waiting on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * lock. Taking the lock synchronises with the rollover and so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * we are forced to see the updated generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * - We get a valid ASID back from the cmpxchg, which means the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * relaxed xchg in flush_context will treat us as reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * because atomic RmWs are totally ordered for a given location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) old_active_asid = atomic64_read(this_cpu_ptr(&active_asids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (old_active_asid && asid_gen_match(asid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) old_active_asid, asid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto switch_mm_fastpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) raw_spin_lock_irqsave(&cpu_asid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* Check that our ASID belongs to the current generation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) asid = atomic64_read(&mm->context.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (!asid_gen_match(asid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) asid = new_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) atomic64_set(&mm->context.id, asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) atomic64_set(this_cpu_ptr(&active_asids), asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) switch_mm_fastpath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) arm64_apply_bp_hardening();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * emulating PAN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!system_uses_ttbr0_pan())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) cpu_switch_mm(mm->pgd, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned long arm64_mm_context_get(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u64 asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (!pinned_asid_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) raw_spin_lock_irqsave(&cpu_asid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) asid = atomic64_read(&mm->context.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (refcount_inc_not_zero(&mm->context.pinned))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (nr_pinned_asids >= max_pinned_asids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) asid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!asid_gen_match(asid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * We went through one or more rollover since that ASID was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * used. Ensure that it is still valid, or generate a new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) asid = new_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) atomic64_set(&mm->context.id, asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) nr_pinned_asids++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) __set_bit(asid2idx(asid), pinned_asid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) refcount_set(&mm->context.pinned, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) asid &= ~ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* Set the equivalent of USER_ASID_BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (asid && arm64_kernel_unmapped_at_el0())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) asid |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) EXPORT_SYMBOL_GPL(arm64_mm_context_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) void arm64_mm_context_put(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) u64 asid = atomic64_read(&mm->context.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!pinned_asid_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) raw_spin_lock_irqsave(&cpu_asid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (refcount_dec_and_test(&mm->context.pinned)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) __clear_bit(asid2idx(asid), pinned_asid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) nr_pinned_asids--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) EXPORT_SYMBOL_GPL(arm64_mm_context_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Errata workaround post TTBRx_EL1 update. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) asmlinkage void post_ttbr_update_workaround(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) asm(ALTERNATIVE("nop; nop; nop",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) "ic iallu; dsb nsh; isb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ARM64_WORKAROUND_CAVIUM_27456));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned long ttbr1 = read_sysreg(ttbr1_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) unsigned long asid = ASID(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Skip CNP for the reserved ASID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (system_supports_cnp() && asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ttbr0 |= TTBR_CNP_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* SW PAN needs a copy of the ASID in TTBR0 for entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Set ASID in TTBR1 since TCR.A1 is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ttbr1 &= ~TTBR_ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) write_sysreg(ttbr1, ttbr1_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) write_sysreg(ttbr0, ttbr0_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) post_ttbr_update_workaround();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static int asids_update_limit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) unsigned long num_available_asids = NUM_USER_ASIDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (arm64_kernel_unmapped_at_el0()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) num_available_asids /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (pinned_asid_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) set_kpti_asid_bits(pinned_asid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Expect allocation after rollover to fail if we don't have at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * one more ASID than CPUs. ASID #0 is reserved for init_mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) WARN_ON(num_available_asids - 1 <= num_possible_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) pr_info("ASID allocator initialised with %lu entries\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) num_available_asids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * There must always be an ASID available after rollover. Ensure that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * even if all CPUs have a reserved ASID and the maximum number of ASIDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * are pinned, there still is at least one empty slot in the ASID map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) arch_initcall(asids_update_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static int asids_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) asid_bits = get_cpu_asid_bits();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) atomic64_set(&asid_generation, ASID_FIRST_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (!asid_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) panic("Failed to allocate bitmap for %lu ASIDs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) NUM_USER_ASIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) sizeof(*pinned_asid_map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) nr_pinned_asids = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * We cannot call set_reserved_asid_bits() here because CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * caps are not finalized yet, so it is safer to assume KPTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * and reserve kernel ASID's from beginning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) set_kpti_asid_bits(asid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) early_initcall(asids_init);