Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Carsten Langgaard, carstenl@mips.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/cpu_pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/cpu-type.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/bootinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/hazards.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/tlbmisc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) extern void build_tlb_refill_handler(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * itlb/dtlb are not totally transparent to software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static inline void flush_micro_tlb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	case CPU_LOONGSON2EF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		write_c0_diag(LOONGSON_DIAG_ITLB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	case CPU_LOONGSON64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		flush_micro_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) void local_flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	unsigned long old_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	int entry, ftlbhighset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	/* Save old context and create impossible VPN2 value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	old_ctx = read_c0_entryhi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	htw_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	write_c0_entrylo0(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	write_c0_entrylo1(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	entry = num_wired_entries();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * Blast 'em all away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 * If there are any wired entries, fall back to iterating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (cpu_has_tlbinv && !entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		if (current_cpu_data.tlbsizevtlb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			write_c0_index(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			tlbinvf();  /* invalidate VTLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		ftlbhighset = current_cpu_data.tlbsizevtlb +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			current_cpu_data.tlbsizeftlbsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		for (entry = current_cpu_data.tlbsizevtlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		     entry < ftlbhighset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		     entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			write_c0_index(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			tlbinvf();  /* invalidate one FTLB set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		while (entry < current_cpu_data.tlbsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 			/* Make sure all entries differ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			write_c0_index(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			tlb_write_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	tlbw_use_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	write_c0_entryhi(old_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	htw_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	flush_micro_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) EXPORT_SYMBOL(local_flush_tlb_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	if (cpu_context(cpu, mm) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		unsigned long size, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		start = round_down(start, PAGE_SIZE << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		end = round_up(end, PAGE_SIZE << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		size = (end - start) >> (PAGE_SHIFT + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		if (size <= (current_cpu_data.tlbsizeftlbsets ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			     current_cpu_data.tlbsize / 8 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			     current_cpu_data.tlbsize / 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			unsigned long old_entryhi, old_mmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			int newpid = cpu_asid(cpu, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			old_entryhi = read_c0_entryhi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			if (cpu_has_mmid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 				old_mmid = read_c0_memorymapid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 				write_c0_memorymapid(newpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			htw_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 				int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 				if (cpu_has_mmid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 					write_c0_entryhi(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 					write_c0_entryhi(start | newpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 				start += (PAGE_SIZE << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 				mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 				tlb_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 				tlb_probe_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 				idx = read_c0_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 				write_c0_entrylo0(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 				write_c0_entrylo1(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 				if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 				/* Make sure all entries differ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 				mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 				tlb_write_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			tlbw_use_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			write_c0_entryhi(old_entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			if (cpu_has_mmid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 				write_c0_memorymapid(old_mmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			htw_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			drop_mmu_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		flush_micro_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	unsigned long size, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	size = (size + 1) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	if (size <= (current_cpu_data.tlbsizeftlbsets ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		     current_cpu_data.tlbsize / 8 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		     current_cpu_data.tlbsize / 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		int pid = read_c0_entryhi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		start &= (PAGE_MASK << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		end += ((PAGE_SIZE << 1) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		end &= (PAGE_MASK << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		htw_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			write_c0_entryhi(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			start += (PAGE_SIZE << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			tlb_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			tlb_probe_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			idx = read_c0_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			write_c0_entrylo0(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			write_c0_entrylo1(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			/* Make sure all entries differ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			tlb_write_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		tlbw_use_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		write_c0_entryhi(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		htw_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	flush_micro_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (cpu_context(cpu, vma->vm_mm) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		unsigned long old_mmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		unsigned long flags, old_entryhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		page &= (PAGE_MASK << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		old_entryhi = read_c0_entryhi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		htw_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		if (cpu_has_mmid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			old_mmid = read_c0_memorymapid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			write_c0_entryhi(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		tlb_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		tlb_probe_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		idx = read_c0_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		write_c0_entrylo0(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		write_c0_entrylo1(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		/* Make sure all entries differ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		tlb_write_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		tlbw_use_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		write_c0_entryhi(old_entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		if (cpu_has_mmid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			write_c0_memorymapid(old_mmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		htw_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		flush_micro_tlb_vm(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  * This one is only used for pages with the global bit set so we don't care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * much about the ASID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void local_flush_tlb_one(unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	int oldpid, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	oldpid = read_c0_entryhi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	htw_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	page &= (PAGE_MASK << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	write_c0_entryhi(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	tlb_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	tlb_probe_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	idx = read_c0_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	write_c0_entrylo0(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	write_c0_entrylo1(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (idx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		/* Make sure all entries differ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		tlb_write_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		tlbw_use_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	write_c0_entryhi(oldpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	htw_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	flush_micro_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * We will need multiple versions of update_mmu_cache(), one that just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * updates the TLB with the new pte(s), and another which also checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * for the R4k "end of page" hardware bug and does the needy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	pgd_t *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	p4d_t *p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	pud_t *pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	int idx, pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	 * Handle debugger faulting in for debugee.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	if (current->active_mm != vma->vm_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	htw_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	address &= (PAGE_MASK << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	if (cpu_has_mmid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		write_c0_entryhi(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		write_c0_entryhi(address | pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	pgdp = pgd_offset(vma->vm_mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	tlb_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	tlb_probe_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	p4dp = p4d_offset(pgdp, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	pudp = pud_offset(p4dp, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	pmdp = pmd_offset(pudp, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	idx = read_c0_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	/* this could be a huge page  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (pmd_huge(*pmdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		unsigned long lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		write_c0_pagemask(PM_HUGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		ptep = (pte_t *)pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		lo = pte_to_entrylo(pte_val(*ptep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		write_c0_entrylo0(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			tlb_write_random();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			tlb_write_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		tlbw_use_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		write_c0_pagemask(PM_DEFAULT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		ptep = pte_offset_map(pmdp, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #ifdef CONFIG_XPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		if (cpu_has_xpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		ptep++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		if (cpu_has_xpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		write_c0_entrylo0(ptep->pte_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		ptep++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		write_c0_entrylo1(ptep->pte_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			tlb_write_random();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 			tlb_write_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	tlbw_use_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	htw_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	flush_micro_tlb_vm(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		     unsigned long entryhi, unsigned long pagemask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #ifdef CONFIG_XPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	panic("Broken for XPA kernels");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	unsigned int old_mmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	unsigned long wired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	unsigned long old_pagemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	unsigned long old_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	if (cpu_has_mmid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		old_mmid = read_c0_memorymapid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		write_c0_memorymapid(MMID_KERNEL_WIRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	/* Save old context and create impossible VPN2 value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	old_ctx = read_c0_entryhi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	htw_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	old_pagemask = read_c0_pagemask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	wired = num_wired_entries();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	write_c0_wired(wired + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	write_c0_index(wired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	tlbw_use_hazard();	/* What is the hazard here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	write_c0_pagemask(pagemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	write_c0_entryhi(entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	write_c0_entrylo0(entrylo0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	write_c0_entrylo1(entrylo1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	tlb_write_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	tlbw_use_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	write_c0_entryhi(old_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (cpu_has_mmid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		write_c0_memorymapid(old_mmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	tlbw_use_hazard();	/* What is the hazard here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	htw_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	write_c0_pagemask(old_pagemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) int has_transparent_hugepage(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	static unsigned int mask = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	if (mask == -1) {	/* first call comes during __init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		write_c0_pagemask(PM_HUGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		back_to_back_c0_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		mask = read_c0_pagemask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		write_c0_pagemask(PM_DEFAULT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	return mask == PM_HUGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) EXPORT_SYMBOL(has_transparent_hugepage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)  * Used for loading TLB entries before trap_init() has started, when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)  * don't actually want to add a wired entry which remains throughout the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)  * lifetime of the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int temp_tlb_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			       unsigned long entryhi, unsigned long pagemask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	unsigned long wired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	unsigned long old_pagemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	unsigned long old_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	/* Save old context and create impossible VPN2 value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	htw_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	old_ctx = read_c0_entryhi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	old_pagemask = read_c0_pagemask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	wired = num_wired_entries();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	if (--temp_tlb_entry < wired) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		       "No TLB space left for add_temporary_entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	write_c0_index(temp_tlb_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	write_c0_pagemask(pagemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	write_c0_entryhi(entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	write_c0_entrylo0(entrylo0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	write_c0_entrylo1(entrylo1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	mtc0_tlbw_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	tlb_write_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	tlbw_use_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	write_c0_entryhi(old_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	write_c0_pagemask(old_pagemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	htw_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static int ntlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static int __init set_ntlb(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	get_option(&str, &ntlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) __setup("ntlb=", set_ntlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)  * Configure TLB (for init or after a CPU has been powered off).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static void r4k_tlb_configure(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	 * You should never change this register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	 *     the value in the c0_pagemask register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	 *   - The entire mm handling assumes the c0_pagemask register to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	 *     be set to fixed-size pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	write_c0_pagemask(PM_DEFAULT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	back_to_back_c0_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	if (read_c0_pagemask() != PM_DEFAULT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	write_c0_wired(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	if (current_cpu_type() == CPU_R10000 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	    current_cpu_type() == CPU_R12000 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	    current_cpu_type() == CPU_R14000 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	    current_cpu_type() == CPU_R16000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		write_c0_framemask(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	if (cpu_has_rixi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		 * Enable the no read, no exec bits, and enable large physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		 * address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		set_c0_pagegrain(PG_RIE | PG_XIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	temp_tlb_entry = current_cpu_data.tlbsize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	/* From this point on the ARC firmware is dead.	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	/* Did I tell you that ARC SUCKS?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) void tlb_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	r4k_tlb_configure();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	if (ntlb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			int wired = current_cpu_data.tlbsize - ntlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			write_c0_wired(wired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			write_c0_index(wired-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			printk("Restricting TLB to %d entries\n", ntlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	build_tlb_refill_handler();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			       void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	case CPU_PM_ENTER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	case CPU_PM_EXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		r4k_tlb_configure();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static struct notifier_block r4k_tlb_pm_notifier_block = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	.notifier_call = r4k_tlb_pm_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int __init r4k_tlb_init_pm(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) arch_initcall(r4k_tlb_init_pm);