^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __ASM_ASM_ASID_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __ASM_ASM_ASID_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct asid_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) atomic64_t generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned long *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) atomic64_t __percpu *active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) u64 __percpu *reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) u32 bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* Lock protecting the structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* Which CPU requires context flush on next call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) cpumask_t flush_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* Number of ASID allocated by context (shift value) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned int ctxt_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* Callback to locally flush the context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void (*flush_cpu_ctxt_cb)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define NUM_ASIDS(info) (1UL << ((info)->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) void asid_new_context(struct asid_info *info, atomic64_t *pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned int cpu, struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Check the ASID is still valid for the context. If not generate a new ASID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * @pasid: Pointer to the current ASID batch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @cpu: current CPU ID. Must have been acquired throught get_cpu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline void asid_check_context(struct asid_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) atomic64_t *pasid, unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u64 asid, old_active_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) asid = atomic64_read(pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * The memory ordering here is subtle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * If our active_asid is non-zero and the ASID matches the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * generation, then we update the active_asid entry with a relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * cmpxchg. Racing with a concurrent rollover means that either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * - We get a zero back from the cmpxchg and end up waiting on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * lock. Taking the lock synchronises with the rollover and so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * we are forced to see the updated generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * - We get a valid ASID back from the cmpxchg, which means the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * relaxed xchg in flush_context will treat us as reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * because atomic RmWs are totally ordered for a given location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) old_active_asid = atomic64_read(&active_asid(info, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (old_active_asid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) old_active_asid, asid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) asid_new_context(info, pasid, cpu, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int asid_allocator_init(struct asid_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 bits, unsigned int asid_per_ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void (*flush_cpu_ctxt_cb)(void));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #endif