Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Author: Varun Sethi, <varun.sethi@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * This file is derived from arch/powerpc/kvm/e500.c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * by Yu Liu <yu.liu@freescale.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/kvm_ppc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/dbell.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "booke.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "e500.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	enum ppc_dbell dbell_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	unsigned long tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	case INT_CLASS_NONCRIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		dbell_type = PPC_G_DBELL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	case INT_CLASS_CRIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		dbell_type = PPC_G_DBELL_CRIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	case INT_CLASS_MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		dbell_type = PPC_G_DBELL_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	ppc_msgsnd(dbell_type, 0, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /* gtlbe must not be mapped by more than one host tlb entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 			   struct kvm_book3e_206_tlb_entry *gtlbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	unsigned int tid, ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	gva_t eaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	ts = get_tlb_ts(gtlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	tid = get_tlb_tid(gtlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	/* We search the host TLB to invalidate its shadow TLB entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	val = (tid << 16) | ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	eaddr = get_tlb_eaddr(gtlbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	mtspr(SPRN_MAS6, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	val = mfspr(SPRN_MAS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (val & MAS1_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		mtspr(SPRN_MAS1, val & ~MAS1_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		asm volatile("tlbwe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	mtspr(SPRN_MAS5, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	/* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	mtspr(SPRN_MAS8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	isync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	asm volatile("tlbilxlpid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	mtspr(SPRN_MAS5, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	vcpu->arch.pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* We use two lpids per VM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	kvmppc_booke_vcpu_load(vcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	mtspr(SPRN_LPID, get_lpid(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	mtspr(SPRN_GPIR, vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	vcpu->arch.epsc = vcpu->arch.eplc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	mtspr(SPRN_EPLC, vcpu->arch.eplc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	mtspr(SPRN_EPSC, vcpu->arch.epsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	mtspr(SPRN_GIVPR, vcpu->arch.ivpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	mtspr(SPRN_GEPR, vcpu->arch.epr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	mtspr(SPRN_GESR, vcpu->arch.shared->esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	    __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		kvmppc_e500_tlbil_all(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		__this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	vcpu->arch.eplc = mfspr(SPRN_EPLC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	vcpu->arch.epsc = mfspr(SPRN_EPSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	vcpu->arch.epr = mfspr(SPRN_GEPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	vcpu->arch.shared->dar = mfspr(SPRN_GDEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	vcpu->arch.shared->esr = mfspr(SPRN_GESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	vcpu->arch.oldpir = mfspr(SPRN_PIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	kvmppc_booke_vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int kvmppc_core_check_processor_compat(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	 * Since guests have the privilege to enable AltiVec, we need AltiVec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	 * support in the host to save/restore their context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	 * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	 * because it's cleared in the absence of CONFIG_ALTIVEC!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		r = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 				 SPRN_EPCR_DUVD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	vcpu->arch.pvr = mfspr(SPRN_PVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	vcpu_e500->svr = mfspr(SPRN_SVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	vcpu->arch.cpu_type = KVM_CPU_E500MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 					struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			       KVM_SREGS_E_PC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	sregs->u.e.impl.fsl.features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	kvmppc_get_sregs_e500_tlb(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	sregs->u.e.ivor_high[3] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	return kvmppc_get_sregs_ivor(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 					struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (sregs->u.e.features & KVM_SREGS_E_PM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			sregs->u.e.ivor_high[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (sregs->u.e.features & KVM_SREGS_E_PC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			sregs->u.e.ivor_high[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			sregs->u.e.ivor_high[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return kvmppc_set_sregs_ivor(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			      union kvmppc_one_reg *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	case KVM_REG_PPC_SPRG9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		*val = get_reg_val(id, vcpu->arch.sprg9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			      union kvmppc_one_reg *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	case KVM_REG_PPC_SPRG9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		vcpu->arch.sprg9 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int kvmppc_core_vcpu_create_e500mc(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct kvmppc_vcpu_e500 *vcpu_e500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	/* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	vcpu->arch.oldpir = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	err = kvmppc_e500_tlb_init(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (!vcpu->arch.shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		goto uninit_tlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) uninit_tlb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	kvmppc_e500_tlb_uninit(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	free_page((unsigned long)vcpu->arch.shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	kvmppc_e500_tlb_uninit(vcpu_e500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	int lpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	lpid = kvmppc_alloc_lpid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	if (lpid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		return lpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	 * Use two lpids per VM on cores with two threads like e6500. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	 * even numbers to speedup vcpu lpid computation with consecutive lpids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	 * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	if (threads_per_core == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		lpid <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	kvm->arch.lpid = lpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	int lpid = kvm->arch.lpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	if (threads_per_core == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		lpid >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	kvmppc_free_lpid(lpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static struct kvmppc_ops kvm_ops_e500mc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	.get_sregs = kvmppc_core_get_sregs_e500mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	.set_sregs = kvmppc_core_set_sregs_e500mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	.get_one_reg = kvmppc_get_one_reg_e500mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	.set_one_reg = kvmppc_set_one_reg_e500mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	.vcpu_load   = kvmppc_core_vcpu_load_e500mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	.vcpu_put    = kvmppc_core_vcpu_put_e500mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	.vcpu_create = kvmppc_core_vcpu_create_e500mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	.vcpu_free   = kvmppc_core_vcpu_free_e500mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	.init_vm = kvmppc_core_init_vm_e500mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	.destroy_vm = kvmppc_core_destroy_vm_e500mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	.emulate_op = kvmppc_core_emulate_op_e500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	.emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	.emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static int __init kvmppc_e500mc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	r = kvmppc_booke_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	 * Use two lpids per VM on dual threaded processors like e6500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	 * to workarround the lack of tlb write conditional instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	 * Expose half the number of available hardware lpids to the lpid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	 * allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	kvmppc_claim_lpid(0); /* host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	kvm_ops_e500mc.owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	kvmppc_pr_ops = &kvm_ops_e500mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void __exit kvmppc_e500mc_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	kvmppc_pr_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	kvmppc_booke_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) module_init(kvmppc_e500mc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) module_exit(kvmppc_e500mc_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) MODULE_ALIAS_MISCDEV(KVM_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) MODULE_ALIAS("devname:kvm");