^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) static atomic64_t mmid_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) static unsigned int num_mmids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) static unsigned long *mmid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static DEFINE_PER_CPU(u64, reserved_mmids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static cpumask_t tlb_flush_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static bool asid_versions_eq(int cpu, u64 a, u64 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) return ((a ^ b) & asid_version_mask(cpu)) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void get_new_mmu_context(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) u64 asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * This function is specific to ASIDs, and should not be called when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * MMIDs are in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) asid = asid_cache(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (cpu_has_vtag_icache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) local_flush_tlb_all(); /* start new asid cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) set_cpu_context(cpu, mm, asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) asid_cache(cpu) = asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) EXPORT_SYMBOL_GPL(get_new_mmu_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void check_mmu_context(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * This function is specific to ASIDs, and should not be called when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * MMIDs are in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Check if our ASID is of an older version and thus invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) get_new_mmu_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) EXPORT_SYMBOL_GPL(check_mmu_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void flush_context(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u64 mmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Update the list of reserved MMIDs and the MMID bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bitmap_clear(mmid_map, 0, num_mmids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Reserve an MMID for kmap/wired entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __set_bit(MMID_KERNEL_WIRED, mmid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * If this CPU has already been through a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * rollover, but hasn't run another task in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * the meantime, we must preserve its reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * MMID, as this is the only trace we have of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * the process it is still running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (mmid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) mmid = per_cpu(reserved_mmids, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) per_cpu(reserved_mmids, cpu) = mmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Queue a TLB invalidation for each CPU to perform on next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * context-switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) cpumask_setall(&tlb_flush_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) bool hit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Iterate over the set of reserved MMIDs looking for a match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * If we find one, then we can update our mm to use newmmid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * (i.e. the same MMID in the current generation) but we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * exit the loop early, since we need to ensure that all copies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * of the old MMID are updated to reflect the mm. Failure to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * so could result in us missing the reserved MMID in a future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) hit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (per_cpu(reserved_mmids, cpu) == mmid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) hit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) per_cpu(reserved_mmids, cpu) = newmmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return hit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static u64 get_new_mmid(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static u32 cur_idx = MMID_KERNEL_WIRED + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u64 mmid, version, mmid_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) mmid = cpu_context(0, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) version = atomic64_read(&mmid_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) mmid_mask = cpu_asid_mask(&boot_cpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (!asid_versions_eq(0, mmid, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u64 newmmid = version | (mmid & mmid_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * If our current MMID was active during a rollover, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * can continue to use it and this was just a false alarm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (check_update_reserved_mmid(mmid, newmmid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) mmid = newmmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto set_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * We had a valid MMID in a previous life, so try to re-use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * it if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) mmid = newmmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) goto set_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Allocate a free MMID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (mmid != num_mmids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) goto reserve_mmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* We're out of MMIDs, so increment the global version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) version = atomic64_add_return_relaxed(asid_first_version(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) &mmid_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Note currently active MMIDs & mark TLBs as requiring flushes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) flush_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* We have more MMIDs than CPUs, so this will always succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) mmid = find_first_zero_bit(mmid_map, num_mmids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) reserve_mmid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) __set_bit(mmid, mmid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cur_idx = mmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mmid |= version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) set_context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) set_cpu_context(0, mm, mmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return mmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void check_switch_mmu_context(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u64 ctx, old_active_mmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!cpu_has_mmid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) check_mmu_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) write_c0_entryhi(cpu_asid(cpu, mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) goto setup_pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * unnecessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * The memory ordering here is subtle. If our active_mmids is non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * and the MMID matches the current version, then we update the CPU's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * means that either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * - We get a zero back from the cmpxchg and end up waiting on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * with the rollover and so we are forced to see the updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * - We get a valid MMID back from the cmpxchg, which means the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * relaxed xchg in flush_context will treat us as reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * because atomic RmWs are totally ordered for a given location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ctx = cpu_context(cpu, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!old_active_mmid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ctx = cpu_context(cpu, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ctx = get_new_mmid(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Invalidate the local TLB if needed. Note that we must only clear our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * bit in tlb_flush_pending after this is complete, so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * cpu_has_shared_ftlb_entries case below isn't misled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (cpu_has_vtag_icache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) cpumask_clear_cpu(cpu, &tlb_flush_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * If this CPU shares FTLB entries with its siblings and one or more of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * those siblings hasn't yet invalidated its TLB following a version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * increase then we need to invalidate any TLB entries for our MMID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * that we might otherwise pick up from a sibling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * CONFIG_SMP=n kernels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (cpu_has_shared_ftlb_entries &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* Ensure we operate on the new MMID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Invalidate all TLB entries associated with the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * MMID, and wait for the invalidation to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ginvt_mmid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) sync_ginv();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) setup_pgd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) EXPORT_SYMBOL_GPL(check_switch_mmu_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static int mmid_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!cpu_has_mmid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * Expect allocation after rollover to fail if we don't have at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * one more MMID than CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) num_mmids = asid_first_version(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) WARN_ON(num_mmids <= num_possible_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) atomic64_set(&mmid_version, asid_first_version(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!mmid_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Reserve an MMID for kmap/wired entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) __set_bit(MMID_KERNEL_WIRED, mmid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pr_info("MMID allocator initialised with %u entries\n", num_mmids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) early_initcall(mmid_init);