Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Author: Yu Liu, yu.liu@freescale.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *         Scott Wood, scottwood@freescale.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *         Ashish Kalra, ashish.kalra@freescale.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *         Varun Sethi, varun.sethi@freescale.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *         Alexander Graf, agraf@suse.de
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * This file is based on arch/powerpc/kvm/44x_tlb.c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * by Hollis Blanchard <hollisb@us.ibm.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/kvm_ppc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include "e500.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include "trace_booke.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include "timing.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include "e500_mmu_host.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static inline unsigned int gtlb0_get_next_victim(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		struct kvmppc_vcpu_e500 *vcpu_e500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	unsigned int victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	victim = vcpu_e500->gtlb_nv[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		vcpu_e500->gtlb_nv[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	return victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static int tlb0_set_base(gva_t addr, int sets, int ways)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	int set_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	set_base = (addr >> PAGE_SHIFT) & (sets - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	set_base *= ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	return set_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			     vcpu_e500->gtlb_params[0].ways);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	int esel = get_tlb_esel_bit(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (tlbsel == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		esel &= vcpu_e500->gtlb_params[0].ways - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	return esel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) /* Search the guest TLB for a matching entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		gva_t eaddr, int tlbsel, unsigned int pid, int as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	int size = vcpu_e500->gtlb_params[tlbsel].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	unsigned int set_base, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (tlbsel == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		set_base = gtlb0_set_base(vcpu_e500, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		size = vcpu_e500->gtlb_params[0].ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		if (eaddr < vcpu_e500->tlb1_min_eaddr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 				eaddr > vcpu_e500->tlb1_max_eaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		set_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	offset = vcpu_e500->gtlb_offset[tlbsel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		struct kvm_book3e_206_tlb_entry *tlbe =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			&vcpu_e500->gtlb_arch[offset + set_base + i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		unsigned int tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		if (eaddr < get_tlb_eaddr(tlbe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		if (eaddr > get_tlb_end(tlbe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		tid = get_tlb_tid(tlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		if (tid && (tid != pid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		if (!get_tlb_v(tlbe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		if (get_tlb_ts(tlbe) != as && as != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		return set_base + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		gva_t eaddr, int as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	unsigned int victim, tsized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	int tlbsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	/* since we only have two TLBs, only lower bit is used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		| MAS1_TID(get_tlbmiss_tid(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		| MAS1_TSIZE(tsized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		| (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		| (get_cur_pid(vcpu) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		| (as ? MAS6_SAS : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	int size = vcpu_e500->gtlb_params[1].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	gva_t eaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	vcpu_e500->tlb1_min_eaddr = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	vcpu_e500->tlb1_max_eaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	offset = vcpu_e500->gtlb_offset[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		struct kvm_book3e_206_tlb_entry *tlbe =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			&vcpu_e500->gtlb_arch[offset + i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		if (!get_tlb_v(tlbe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		eaddr = get_tlb_eaddr(tlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		vcpu_e500->tlb1_min_eaddr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 				min(vcpu_e500->tlb1_min_eaddr, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		eaddr = get_tlb_end(tlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		vcpu_e500->tlb1_max_eaddr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 				max(vcpu_e500->tlb1_max_eaddr, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 				struct kvm_book3e_206_tlb_entry *gtlbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	unsigned long start, end, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	size = get_tlb_bytes(gtlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	start = get_tlb_eaddr(gtlbe) & ~(size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	end = start + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	return vcpu_e500->tlb1_min_eaddr == start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			vcpu_e500->tlb1_max_eaddr == end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* This function is supposed to be called for a adding a new valid tlb entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 				struct kvm_book3e_206_tlb_entry *gtlbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	unsigned long start, end, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (!get_tlb_v(gtlbe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	size = get_tlb_bytes(gtlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	start = get_tlb_eaddr(gtlbe) & ~(size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	end = start + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static inline int kvmppc_e500_gtlbe_invalidate(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 				struct kvmppc_vcpu_e500 *vcpu_e500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 				int tlbsel, int esel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	struct kvm_book3e_206_tlb_entry *gtlbe =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		get_entry(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (unlikely(get_tlb_iprot(gtlbe)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		kvmppc_recalc_tlb1map_range(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	gtlbe->mas1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	int esel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	if (value & MMUCSR0_TLB0FI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (value & MMUCSR0_TLB1FI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	/* Invalidate all host shadow mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	unsigned int ia;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	int esel, tlbsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	ia = (ea >> 2) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	/* since we only have two TLBs, only lower bit is used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	tlbsel = (ea >> 3) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (ia) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		/* invalidate all entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		     esel++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		ea &= 0xfffff000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 				get_cur_pid(vcpu), -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		if (esel >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	/* Invalidate all host shadow mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		       int pid, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	struct kvm_book3e_206_tlb_entry *tlbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	int tid, esel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	/* invalidate all entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		tlbe = get_entry(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		tid = get_tlb_tid(tlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		if (type == 0 || tid == pid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		       gva_t ea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	int tlbsel, esel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		if (esel >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	int pid = get_cur_spid(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	if (type == 0 || type == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		tlbilx_all(vcpu_e500, 0, pid, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		tlbilx_all(vcpu_e500, 1, pid, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	} else if (type == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		tlbilx_one(vcpu_e500, pid, ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	int tlbsel, esel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	struct kvm_book3e_206_tlb_entry *gtlbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	tlbsel = get_tlb_tlbsel(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	esel = get_tlb_esel(vcpu, tlbsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	gtlbe = get_entry(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	vcpu->arch.shared->mas1 = gtlbe->mas1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	vcpu->arch.shared->mas2 = gtlbe->mas2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	int as = !!get_cur_sas(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	unsigned int pid = get_cur_spid(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	int esel, tlbsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		if (esel >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			gtlbe = get_entry(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	if (gtlbe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		vcpu->arch.shared->mas1 = gtlbe->mas1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		vcpu->arch.shared->mas2 = gtlbe->mas2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		int victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		/* since we only have two TLBs, only lower bit is used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			| MAS0_ESEL(victim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		vcpu->arch.shared->mas1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			  (vcpu->arch.shared->mas6 & MAS6_SPID0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			| ((vcpu->arch.shared->mas6 & MAS6_SAS) ? MAS1_TS : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			| (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		vcpu->arch.shared->mas2 &= MAS2_EPN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 					   MAS2_ATTRIB_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 					     MAS3_U2 | MAS3_U3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	struct kvm_book3e_206_tlb_entry *gtlbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	int tlbsel, esel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	int recal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	tlbsel = get_tlb_tlbsel(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	esel = get_tlb_esel(vcpu, tlbsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	gtlbe = get_entry(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (get_tlb_v(gtlbe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		if ((tlbsel == 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			recal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	gtlbe->mas1 = vcpu->arch.shared->mas1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	gtlbe->mas2 = vcpu->arch.shared->mas2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	if (!(vcpu->arch.shared->msr & MSR_CM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		gtlbe->mas2 &= 0xffffffffUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	                              gtlbe->mas2, gtlbe->mas7_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	if (tlbsel == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		 * If a valid tlb1 entry is overwritten then recalculate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		 * min/max TLB1 map address range otherwise no need to look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		 * in tlb1 array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		if (recal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			kvmppc_recalc_tlb1map_range(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			kvmppc_set_tlb1map_range(vcpu, gtlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	if (tlbe_is_host_safe(vcpu, gtlbe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		u64 eaddr = get_tlb_eaddr(gtlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		u64 raddr = get_tlb_raddr(gtlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		if (tlbsel == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			gtlbe->mas1 &= ~MAS1_TSIZE(~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		/* Premap the faulting page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 				  gva_t eaddr, unsigned int pid, int as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	int esel, tlbsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		if (esel >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			return index_of(tlbsel, esel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)                                struct kvm_translation *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	gva_t eaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	u8 pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	u8 as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	eaddr = tr->linear_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	pid = (tr->linear_address >> 32) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	as = (tr->linear_address >> 40) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	if (index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		tr->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	/* XXX what does "writeable" and "usermode" even mean? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	tr->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			gva_t eaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	struct kvm_book3e_206_tlb_entry *gtlbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	u64 pgmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	pgmask = get_tlb_bytes(gtlbe) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /*****************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	kfree(vcpu_e500->g2h_tlb1_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	kfree(vcpu_e500->gtlb_priv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	kfree(vcpu_e500->gtlb_priv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	if (vcpu_e500->shared_tlb_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 					  PAGE_SIZE)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			put_page(vcpu_e500->shared_tlb_pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		vcpu_e500->num_shared_tlb_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		kfree(vcpu_e500->shared_tlb_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		vcpu_e500->shared_tlb_pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		kfree(vcpu_e500->gtlb_arch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	vcpu_e500->gtlb_arch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	sregs->u.e.mas0 = vcpu->arch.shared->mas0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	sregs->u.e.mas1 = vcpu->arch.shared->mas1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	sregs->u.e.mas2 = vcpu->arch.shared->mas2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	sregs->u.e.mas4 = vcpu->arch.shared->mas4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	sregs->u.e.mas6 = vcpu->arch.shared->mas6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	sregs->u.e.mmucfg = vcpu->arch.mmucfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	sregs->u.e.tlbcfg[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	sregs->u.e.tlbcfg[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		vcpu->arch.shared->mas0 = sregs->u.e.mas0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		vcpu->arch.shared->mas1 = sregs->u.e.mas1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		vcpu->arch.shared->mas2 = sregs->u.e.mas2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		vcpu->arch.shared->mas4 = sregs->u.e.mas4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		vcpu->arch.shared->mas6 = sregs->u.e.mas6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 				union kvmppc_one_reg *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	long int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	case KVM_REG_PPC_MAS0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		*val = get_reg_val(id, vcpu->arch.shared->mas0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	case KVM_REG_PPC_MAS1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		*val = get_reg_val(id, vcpu->arch.shared->mas1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	case KVM_REG_PPC_MAS2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		*val = get_reg_val(id, vcpu->arch.shared->mas2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	case KVM_REG_PPC_MAS7_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		*val = get_reg_val(id, vcpu->arch.shared->mas7_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	case KVM_REG_PPC_MAS4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		*val = get_reg_val(id, vcpu->arch.shared->mas4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	case KVM_REG_PPC_MAS6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		*val = get_reg_val(id, vcpu->arch.shared->mas6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	case KVM_REG_PPC_MMUCFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		*val = get_reg_val(id, vcpu->arch.mmucfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	case KVM_REG_PPC_EPTCFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		*val = get_reg_val(id, vcpu->arch.eptcfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	case KVM_REG_PPC_TLB0CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	case KVM_REG_PPC_TLB1CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	case KVM_REG_PPC_TLB2CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	case KVM_REG_PPC_TLB3CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		i = id - KVM_REG_PPC_TLB0CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		*val = get_reg_val(id, vcpu->arch.tlbcfg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	case KVM_REG_PPC_TLB0PS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	case KVM_REG_PPC_TLB1PS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	case KVM_REG_PPC_TLB2PS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	case KVM_REG_PPC_TLB3PS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		i = id - KVM_REG_PPC_TLB0PS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		*val = get_reg_val(id, vcpu->arch.tlbps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 			       union kvmppc_one_reg *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	long int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	case KVM_REG_PPC_MAS0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		vcpu->arch.shared->mas0 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	case KVM_REG_PPC_MAS1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		vcpu->arch.shared->mas1 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	case KVM_REG_PPC_MAS2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		vcpu->arch.shared->mas2 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	case KVM_REG_PPC_MAS7_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		vcpu->arch.shared->mas7_3 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	case KVM_REG_PPC_MAS4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		vcpu->arch.shared->mas4 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	case KVM_REG_PPC_MAS6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		vcpu->arch.shared->mas6 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	/* Only allow MMU registers to be set to the config supported by KVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	case KVM_REG_PPC_MMUCFG: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		u32 reg = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		if (reg != vcpu->arch.mmucfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 			r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	case KVM_REG_PPC_EPTCFG: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 		u32 reg = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		if (reg != vcpu->arch.eptcfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 			r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	case KVM_REG_PPC_TLB0CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	case KVM_REG_PPC_TLB1CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	case KVM_REG_PPC_TLB2CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	case KVM_REG_PPC_TLB3CFG: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		/* MMU geometry (N_ENTRY/ASSOC) can be set only using SW_TLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		u32 reg = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		i = id - KVM_REG_PPC_TLB0CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		if (reg != vcpu->arch.tlbcfg[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 			r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	case KVM_REG_PPC_TLB0PS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	case KVM_REG_PPC_TLB1PS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	case KVM_REG_PPC_TLB2PS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	case KVM_REG_PPC_TLB3PS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		u32 reg = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		i = id - KVM_REG_PPC_TLB0PS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		if (reg != vcpu->arch.tlbps[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 			r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static int vcpu_mmu_geometry_update(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		struct kvm_book3e_206_tlb_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	if (params->tlb_sizes[0] <= 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		vcpu->arch.tlbcfg[0] |= params->tlb_sizes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	vcpu->arch.tlbcfg[0] |= params->tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	vcpu->arch.tlbcfg[1] |= params->tlb_sizes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	vcpu->arch.tlbcfg[1] |= params->tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 			      struct kvm_config_tlb *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	struct kvm_book3e_206_tlb_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	char *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	struct tlbe_priv *privs[2] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	u64 *g2h_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	size_t array_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	u32 sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	int num_pages, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 			   sizeof(params)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	if (params.tlb_sizes[1] > 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	if (params.tlb_ways[1] != params.tlb_sizes[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	if (!is_power_of_2(params.tlb_ways[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	if (!is_power_of_2(sets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	if (cfg->array_len < array_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		    cfg->array / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	pages = kmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		goto free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	if (ret != num_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 		num_pages = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		goto put_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	if (!virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 		goto put_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	privs[0] = kcalloc(params.tlb_sizes[0], sizeof(*privs[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	if (!privs[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 		goto put_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	privs[1] = kcalloc(params.tlb_sizes[1], sizeof(*privs[1]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	if (!privs[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		goto free_privs_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	g2h_bitmap = kcalloc(params.tlb_sizes[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 			     sizeof(*g2h_bitmap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 			     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	if (!g2h_bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 		goto free_privs_second;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	free_gtlb(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	vcpu_e500->gtlb_priv[0] = privs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	vcpu_e500->gtlb_priv[1] = privs[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	vcpu_e500->g2h_tlb1_map = g2h_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 		(virt + (cfg->array & (PAGE_SIZE - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 	vcpu_e500->gtlb_offset[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	/* Update vcpu's MMU geometry based on SW_TLB input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	vcpu_mmu_geometry_update(vcpu, &params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	vcpu_e500->shared_tlb_pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	vcpu_e500->num_shared_tlb_pages = num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	vcpu_e500->gtlb_params[0].sets = sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 	vcpu_e500->gtlb_params[1].sets = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	kvmppc_recalc_tlb1map_range(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)  free_privs_second:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	kfree(privs[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)  free_privs_first:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	kfree(privs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)  put_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	for (i = 0; i < num_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 		put_page(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)  free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 			     struct kvm_dirty_tlb *dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	kvmppc_recalc_tlb1map_range(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	kvmppc_core_flush_tlb(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* Vcpu's MMU default configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static int vcpu_mmu_init(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 		       struct kvmppc_e500_tlb_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	/* Initialize RASIZE, PIDSIZE, NTLBS and MAVN fields with host values*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 	vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 	/* Initialize TLBnCFG fields with host values and SW_TLB geometry*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 	vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 			     ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 	vcpu->arch.tlbcfg[0] |= params[0].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 	vcpu->arch.tlbcfg[0] |= params[0].ways << TLBnCFG_ASSOC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 	vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 			     ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 	vcpu->arch.tlbcfg[1] |= params[1].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 	vcpu->arch.tlbcfg[1] |= params[1].ways << TLBnCFG_ASSOC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 	if (has_feature(vcpu, VCPU_FTR_MMU_V2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 		vcpu->arch.tlbps[0] = mfspr(SPRN_TLB0PS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 		vcpu->arch.tlbps[1] = mfspr(SPRN_TLB1PS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 		vcpu->arch.mmucfg &= ~MMUCFG_LRAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 		/* Guest mmu emulation currently doesn't handle E.PT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 		vcpu->arch.eptcfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 		vcpu->arch.tlbcfg[0] &= ~TLBnCFG_PT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 		vcpu->arch.tlbcfg[1] &= ~TLBnCFG_IND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 	struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 	if (e500_mmu_host_init(vcpu_e500))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 		goto free_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 	vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 	vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 	vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 	vcpu_e500->gtlb_params[0].sets =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 		KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 	vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 	vcpu_e500->gtlb_params[1].sets = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 	vcpu_e500->gtlb_arch = kmalloc_array(KVM_E500_TLB0_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 					     KVM_E500_TLB1_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 					     sizeof(*vcpu_e500->gtlb_arch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 					     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 	if (!vcpu_e500->gtlb_arch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 	vcpu_e500->gtlb_offset[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 	vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 	vcpu_e500->gtlb_priv[0] = kcalloc(vcpu_e500->gtlb_params[0].entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 					  sizeof(struct tlbe_ref),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 	if (!vcpu_e500->gtlb_priv[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 		goto free_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 	vcpu_e500->gtlb_priv[1] = kcalloc(vcpu_e500->gtlb_params[1].entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 					  sizeof(struct tlbe_ref),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 	if (!vcpu_e500->gtlb_priv[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) 		goto free_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 	vcpu_e500->g2h_tlb1_map = kcalloc(vcpu_e500->gtlb_params[1].entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) 					  sizeof(*vcpu_e500->g2h_tlb1_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) 	if (!vcpu_e500->g2h_tlb1_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) 		goto free_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) 	vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) 	kvmppc_recalc_tlb1map_range(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)  free_vcpu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) 	free_gtlb(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) 	free_gtlb(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) 	e500_mmu_host_uninit(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }