^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/context.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2012 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/thread_notify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/proc-fns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * On ARMv6, we have the following structure in the Context ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * 31 7 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * +-------------------------+-----------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * | process ID | ASID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * +-------------------------+-----------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * | context ID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * +-------------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * The ASID is used to tag entries in the CPU caches and TLBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * The context ID is used by debuggers and trace logic, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * should be unique within all running processes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * In big endian operation, the two 32 bit words are swapped if accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * by non-64-bit operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define NUM_USER_ASIDS ASID_FIRST_VERSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static DEFINE_PER_CPU(atomic64_t, active_asids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static DEFINE_PER_CPU(u64, reserved_asids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static cpumask_t tlb_flush_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #ifdef CONFIG_ARM_ERRATA_798181
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) cpumask_t *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u64 context_id, asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) raw_spin_lock_irqsave(&cpu_asid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) context_id = mm->context.id.counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (cpu == this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * We only need to send an IPI if the other CPUs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * running the same ASID as the one being invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) asid = per_cpu(active_asids, cpu).counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (asid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) asid = per_cpu(reserved_asids, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (context_id == asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) cpumask_set_cpu(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #ifdef CONFIG_ARM_LPAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * With LPAE, the ASID and page tables are updated atomicly, so there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * no need for a reserved set of tables (the active ASID tracking prevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * any issues across a rollover).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define cpu_set_reserved_ttbr0()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static void cpu_set_reserved_ttbr0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 ttb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Copy TTBR1 into TTBR0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * This points at swapper_pg_dir, which contains only global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * entries so any speculative walks are perfectly safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) : "=r" (ttb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifdef CONFIG_PID_IN_CONTEXTIDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 contextidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct thread_info *thread = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (cmd != THREAD_NOTIFY_SWITCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pid = task_pid_nr(thread->task) << ASID_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) " mrc p15, 0, %0, c13, c0, 1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) " and %0, %0, %2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) " orr %0, %0, %1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) " mcr p15, 0, %0, c13, c0, 1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) : "=r" (contextidr), "+r" (pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) : "I" (~ASID_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static struct notifier_block contextidr_notifier_block = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) .notifier_call = contextidr_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int __init contextidr_notifier_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return thread_register_notifier(&contextidr_notifier_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) arch_initcall(contextidr_notifier_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static void flush_context(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u64 asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Update the list of reserved ASIDs and the ASID bitmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * If this CPU has already been through a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * rollover, but hasn't run another task in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * the meantime, we must preserve its reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * ASID, as this is the only trace we have of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * the process it is still running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (asid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) asid = per_cpu(reserved_asids, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) __set_bit(asid & ~ASID_MASK, asid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) per_cpu(reserved_asids, i) = asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Queue a TLB invalidate and flush the I-cache if necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) cpumask_setall(&tlb_flush_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (icache_is_vivt_asid_tagged())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) __flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static bool check_update_reserved_asid(u64 asid, u64 newasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bool hit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * Iterate over the set of reserved ASIDs looking for a match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * If we find one, then we can update our mm to use newasid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * (i.e. the same ASID in the current generation) but we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * exit the loop early, since we need to ensure that all copies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * of the old ASID are updated to reflect the mm. Failure to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * so could result in us missing the reserved ASID in a future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (per_cpu(reserved_asids, cpu) == asid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) hit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) per_cpu(reserved_asids, cpu) = newasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return hit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static u64 new_context(struct mm_struct *mm, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static u32 cur_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u64 asid = atomic64_read(&mm->context.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u64 generation = atomic64_read(&asid_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (asid != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u64 newasid = generation | (asid & ~ASID_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * If our current ASID was active during a rollover, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * can continue to use it and this was just a false alarm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (check_update_reserved_asid(asid, newasid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return newasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * We had a valid ASID in a previous life, so try to re-use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * it if possible.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) asid &= ~ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!__test_and_set_bit(asid, asid_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return newasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Allocate a free ASID. If we can't find one, take a note of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * currently active ASIDs and mark the TLBs as requiring flushes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * We always count from ASID #1, as we reserve ASID #0 to switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * via TTBR0 and to avoid speculative page table walks from hitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * in any partial walk caches, which could be populated from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * overlapping level-1 descriptors used to map both the module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * area and the userspace stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (asid == NUM_USER_ASIDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) generation = atomic64_add_return(ASID_FIRST_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) &asid_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) flush_context(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) __set_bit(asid, asid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) cur_idx = asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) cpumask_clear(mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return asid | generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u64 asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) __check_vmalloc_seq(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * We cannot update the pgd and the ASID atomicly with classic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * MMU, so switch exclusively to global mappings to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * speculative page table walking with the wrong TTBR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) cpu_set_reserved_ttbr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) asid = atomic64_read(&mm->context.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) goto switch_mm_fastpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) raw_spin_lock_irqsave(&cpu_asid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Check that our ASID belongs to the current generation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) asid = atomic64_read(&mm->context.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) asid = new_context(mm, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) atomic64_set(&mm->context.id, asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) local_flush_bp_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) atomic64_set(&per_cpu(active_asids, cpu), asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) cpumask_set_cpu(cpu, mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) switch_mm_fastpath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) cpu_switch_mm(mm->pgd, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }