Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) #define pr_fmt(fmt)  "Hyper-V: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <linux/hyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <asm/fpu/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/mshyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/msr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/trace/hyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /* Each gva in gva_list encodes up to 4096 pages to flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 				      const struct flush_tlb_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * Fills in gva_list starting from offset. Returns the number of items added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static inline int fill_gva_list(u64 gva_list[], int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 				unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	int gva_n = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	unsigned long cur = start, diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		diff = end > cur ? end - cur : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		gva_list[gva_n] = cur & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		 * Lower 12 bits encode the number of additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		 * pages to flush (in addition to the 'cur' page).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		if (diff >= HV_TLB_FLUSH_UNIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 			gva_list[gva_n] |= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 			cur += HV_TLB_FLUSH_UNIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		}  else if (diff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 			gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 			cur = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		gva_n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	} while (cur < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	return gva_n - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static void hyperv_flush_tlb_others(const struct cpumask *cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 				    const struct flush_tlb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	int cpu, vcpu, gva_n, max_gvas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	struct hv_tlb_flush **flush_pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct hv_tlb_flush *flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	u64 status = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	trace_hyperv_mmu_flush_tlb_others(cpus, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (!hv_hypercall_pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		goto do_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 * Only check the mask _after_ interrupt has been disabled to avoid the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 * mask changing under our feet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (cpumask_empty(cpus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	flush_pcpu = (struct hv_tlb_flush **)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		     this_cpu_ptr(hyperv_pcpu_input_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	flush = *flush_pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (unlikely(!flush)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		goto do_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (info->mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		 * AddressSpace argument must match the CR3 with PCID bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		 * stripped out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		flush->address_space = virt_to_phys(info->mm->pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		flush->address_space &= CR3_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		flush->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		flush->address_space = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	flush->processor_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	if (cpumask_equal(cpus, cpu_present_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		flush->flags |= HV_FLUSH_ALL_PROCESSORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		 * From the supplied CPU set we need to figure out if we can get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		 * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		 * hypercalls. This is possible when the highest VP number in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		 * the set is < 64. As VP numbers are usually in ascending order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		 * and match Linux CPU ids, here is an optimization: we check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		 * the VP number for the highest bit in the supplied set first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		 * so we can quickly find out if using *_EX hypercalls is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		 * must. We will also check all VP numbers when walking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		 * supplied CPU set to remain correct in all cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			goto do_ex_hypercall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		for_each_cpu(cpu, cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			vcpu = hv_cpu_number_to_vp_number(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			if (vcpu == VP_INVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 				local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 				goto do_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			if (vcpu >= 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 				goto do_ex_hypercall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			__set_bit(vcpu, (unsigned long *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 				  &flush->processor_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * We can flush not more than max_gvas with one hypercall. Flush the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 * whole address space if we were asked to do more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	if (info->end == TLB_FLUSH_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 					 flush, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	} else if (info->end &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		   ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 					 flush, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		gva_n = fill_gva_list(flush->gva_list, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 				      info->start, info->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 					     gva_n, 0, flush, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	goto check_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) do_ex_hypercall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	status = hyperv_flush_tlb_others_ex(cpus, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) check_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (!(status & HV_HYPERCALL_RESULT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) do_native:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	native_flush_tlb_others(cpus, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 				      const struct flush_tlb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	int nr_bank = 0, max_gvas, gva_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	struct hv_tlb_flush_ex **flush_pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct hv_tlb_flush_ex *flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	u64 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		return U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	flush_pcpu = (struct hv_tlb_flush_ex **)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		     this_cpu_ptr(hyperv_pcpu_input_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	flush = *flush_pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (info->mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		 * AddressSpace argument must match the CR3 with PCID bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		 * stripped out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		flush->address_space = virt_to_phys(info->mm->pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		flush->address_space &= CR3_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		flush->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		flush->address_space = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	flush->hv_vp_set.valid_bank_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	if (nr_bank < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		return U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	 * We can flush not more than max_gvas with one hypercall. Flush the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	 * whole address space if we were asked to do more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	max_gvas =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		(PAGE_SIZE - sizeof(*flush) - nr_bank *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		 sizeof(flush->hv_vp_set.bank_contents[0])) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		sizeof(flush->gva_list[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (info->end == TLB_FLUSH_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		status = hv_do_rep_hypercall(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			0, nr_bank, flush, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	} else if (info->end &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		   ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		status = hv_do_rep_hypercall(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			0, nr_bank, flush, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		gva_n = fill_gva_list(flush->gva_list, nr_bank,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 				      info->start, info->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		status = hv_do_rep_hypercall(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			gva_n, nr_bank, flush, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) void hyperv_setup_mmu_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	pr_info("Using hypercall for remote TLB flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	pv_ops.mmu.flush_tlb_others = hyperv_flush_tlb_others;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	pv_ops.mmu.tlb_remove_table = tlb_remove_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }