^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * TLB support routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Modified RID allocation for SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Goutham Rao <goutham.rao@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * IPI based ptc implementation and A-step IPI implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Rohit Seth <rohit.seth@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Ken Chen <kenneth.w.chen@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Copyright (C) 2007 Intel Corp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Fenghua Yu <fenghua.yu@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/pal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/sal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u64 mask; /* mask of supported purge page-sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long max_bits; /* log2 of largest supported purge page-size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) } purge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct ia64_ctx ia64_ctx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) .next = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .max_ctx = ~0U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Called after cpu_init() has setup ia64_ctx.max_ctx based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * maximum RID that is supported by boot CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mmu_context_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!ia64_ctx.bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) panic("%s: Failed to allocate %u bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) (ia64_ctx.max_ctx + 1) >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (!ia64_ctx.flushmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) panic("%s: Failed to allocate %u bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) (ia64_ctx.max_ctx + 1) >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Acquire the ia64_ctx.lock before calling this function!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) wrap_mmu_context (struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int i, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long flush_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ia64_ctx.bitmap[i] ^= flush_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* use offset at 300 to skip daemons */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ia64_ctx.max_ctx, 300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ia64_ctx.max_ctx, ia64_ctx.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * can't call flush_tlb_all() here because of race condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * with O(1) scheduler [EF]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) cpu = get_cpu(); /* prevent preemption/migration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) for_each_online_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (i != cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) per_cpu(ia64_need_tlb_flush, i) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Implement "spinaphores" ... like counting semaphores, but they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * spin instead of sleeping. If there are ever any other users for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * this primitive it can be moved up to a spinaphore.h header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct spinaphore {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long ticket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long serve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline void spinaphore_init(struct spinaphore *ss, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ss->ticket = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ss->serve = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static inline void down_spin(struct spinaphore *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (time_before(t, ss->serve))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ia64_invala();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (time_before(t, serve))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static inline void up_spin(struct spinaphore *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ia64_fetchadd(1, &ss->serve, rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static struct spinaphore ptcg_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static u16 nptcg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int need_ptcg_sem = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static int toolatetochangeptcgsem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * purges which is reported from either PAL or SAL PALO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * We don't have sanity checking for nptcg value. It's the user's responsibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * for valid nptcg value on the platform. Otherwise, kernel may hang in some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) set_nptcg(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) get_option(&str, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) __setup("nptcg=", set_nptcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * Maximum number of simultaneous ptc.g purges in the system can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * be defined by PAL_VM_SUMMARY (in which case we should take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * the smallest value for any cpu in the system) or by the PAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * override table (in which case we should ignore the value from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * PAL_VM_SUMMARY).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Complicating the logic here is the fact that num_possible_cpus()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * isn't fully setup until we start bringing cpus online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) setup_ptcg_sem(int max_purges, int nptcg_from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int kp_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int palo_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static int firstcpu = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (toolatetochangeptcgsem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) BUG_ON(1 < nptcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) BUG_ON(max_purges < nptcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) kp_override = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) nptcg = max_purges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) goto resetsema;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (kp_override) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) need_ptcg_sem = num_possible_cpus() > nptcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (nptcg_from == NPTCG_FROM_PALO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) palo_override = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* In PALO max_purges == 0 really means it! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (max_purges == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) panic("Whoa! Platform does not support global TLB purges.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) nptcg = max_purges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (nptcg == PALO_MAX_TLB_PURGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) need_ptcg_sem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto resetsema;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (palo_override) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (nptcg != PALO_MAX_TLB_PURGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) need_ptcg_sem = (num_possible_cpus() > nptcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (max_purges == 0) max_purges = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (firstcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) nptcg = max_purges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) firstcpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (max_purges < nptcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) nptcg = max_purges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (nptcg == PAL_MAX_PURGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) need_ptcg_sem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) need_ptcg_sem = (num_possible_cpus() > nptcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) resetsema:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) spinaphore_init(&ptcg_sem, max_purges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned long end, unsigned long nbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct mm_struct *active_mm = current->active_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) toolatetochangeptcgsem = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (mm != active_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Restore region IDs for mm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (mm && active_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) activate_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (need_ptcg_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) down_spin(&ptcg_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Flush ALAT entries also.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ia64_ptcga(start, (nbits << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ia64_srlz_i();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) start += (1UL << nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) } while (start < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (need_ptcg_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) up_spin(&ptcg_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (mm != active_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) activate_context(active_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) local_flush_tlb_all (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) addr = local_cpu_data->ptce_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) count0 = local_cpu_data->ptce_count[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) count1 = local_cpu_data->ptce_count[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) stride0 = local_cpu_data->ptce_stride[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) stride1 = local_cpu_data->ptce_stride[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) for (i = 0; i < count0; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) for (j = 0; j < count1; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ia64_ptce(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) addr += stride1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) addr += stride0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ia64_srlz_i(); /* srlz.i implies srlz.d */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) __flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned long size = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) unsigned long nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (mm != current->active_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) mm->context = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) nbits = ia64_fls(size + 0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) (nbits < purge.max_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ++nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (nbits > purge.max_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) nbits = purge.max_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) start &= ~((1UL << nbits) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ia64_global_tlb_purge(mm, start, end, nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ia64_ptcl(start, (nbits<<2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) start += (1UL << nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) } while (start < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ia64_srlz_i(); /* srlz.i implies srlz.d */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) void flush_tlb_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (unlikely(end - start >= 1024*1024*1024*1024UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * If we flush more than a tera-byte or across regions, we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * probably better off just flushing the entire TLB(s). This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * should be very rare and is not worth optimizing for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* flush the address range from the tlb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) __flush_tlb_range(vma, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* flush the virt. page-table area mapping the addr range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) EXPORT_SYMBOL(flush_tlb_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) void ia64_tlb_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ia64_ptce_info_t ptce_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) u64 tr_pgbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) pal_vm_info_1_u_t vm_info_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pal_vm_info_2_u_t vm_info_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) "defaulting to architected purge page-sizes.\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) purge.mask = 0x115557000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) purge.max_bits = ia64_fls(purge.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ia64_get_ptce(&ptce_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) local_cpu_data->ptce_base = ptce_info.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) local_cpu_data->ptce_count[0] = ptce_info.count[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) local_cpu_data->ptce_count[1] = ptce_info.count[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) local_flush_tlb_all(); /* nuke left overs from bootstrapping... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) per_cpu(ia64_tr_num, cpu) = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (per_cpu(ia64_tr_num, cpu) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) per_cpu(ia64_tr_num, cpu) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int justonce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (justonce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) justonce = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) printk(KERN_DEBUG "TR register number exceeds "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) "IA64_TR_ALLOC_MAX!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * is_tr_overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Check overlap with inserted TRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) u64 tr_log_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) u64 tr_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) u64 va_rr = ia64_get_rr(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u64 va_rid = RR_TO_RID(va_rr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u64 va_end = va + (1<<log_size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (va_rid != RR_TO_RID(p->rr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) tr_log_size = (p->itir & 0xff) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) tr_end = p->ifa + (1<<tr_log_size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (va > tr_end || p->ifa > va_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * ia64_insert_tr in virtual mode. Allocate a TR slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * va : virtual address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * pte : pte entries inserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * log_size: range to be covered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Return value: <0 : error No.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * >=0 : slot number allocated for TR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Must be called with preemption disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) unsigned long psr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct ia64_tr_entry *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!ia64_idtrs[cpu]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sizeof(struct ia64_tr_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!ia64_idtrs[cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*Check overlap with existing TR entries*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (target_mask & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) p = ia64_idtrs[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) i++, p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (p->pte & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (is_tr_overlap(p, va, log_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) printk(KERN_DEBUG "Overlapped Entry"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) "Inserted for TR Register!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (target_mask & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) i++, p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (p->pte & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (is_tr_overlap(p, va, log_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) printk(KERN_DEBUG "Overlapped Entry"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) "Inserted for TR Register!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) switch (target_mask & 0x3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (i >= per_cpu(ia64_tr_num, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*Record tr info for mca hander use!*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (i > per_cpu(ia64_tr_used, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) per_cpu(ia64_tr_used, cpu) = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) psr = ia64_clear_ic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (target_mask & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ia64_itr(0x1, i, va, pte, log_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) ia64_srlz_i();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) p = ia64_idtrs[cpu] + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) p->ifa = va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) p->pte = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) p->itir = log_size << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) p->rr = ia64_get_rr(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (target_mask & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ia64_itr(0x2, i, va, pte, log_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ia64_srlz_i();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) p->ifa = va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) p->pte = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) p->itir = log_size << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) p->rr = ia64_get_rr(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ia64_set_psr(psr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) r = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) EXPORT_SYMBOL_GPL(ia64_itr_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * ia64_purge_tr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * slot: slot number to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * Must be called with preemption disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) void ia64_ptr_entry(u64 target_mask, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct ia64_tr_entry *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (target_mask & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) p = ia64_idtrs[cpu] + slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) p->pte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ia64_ptr(0x1, p->ifa, p->itir>>2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ia64_srlz_i();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (target_mask & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) p->pte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ia64_ptr(0x2, p->ifa, p->itir>>2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ia64_srlz_i();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) per_cpu(ia64_tr_used, cpu) = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) EXPORT_SYMBOL_GPL(ia64_ptr_entry);