Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _S390_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _S390_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Flush all TLB entries on the local CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) static inline void __tlb_flush_local(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	asm volatile("ptlb" : : : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * Flush TLB entries for a specific ASCE on all CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static inline void __tlb_flush_idte(unsigned long asce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	unsigned long opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	opt = IDTE_PTOA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	if (MACHINE_HAS_TLB_GUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 		opt |= IDTE_GUEST_ASCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	/* Global TLB flush for the mm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		"	.insn	rrf,0xb98e0000,0,%0,%1,0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 		: : "a" (opt), "a" (asce) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * Flush all TLB entries on all CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static inline void __tlb_flush_global(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	unsigned int dummy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	csp(&dummy, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * this implicates multiple ASCEs!).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static inline void __tlb_flush_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	unsigned long gmap_asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	 * If the machine has IDTE we prefer to do a per mm flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	 * on all cpus instead of doing a local flush if the mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	 * only ran on the local cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	atomic_inc(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	/* Reset TLB flush mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	gmap_asce = READ_ONCE(mm->context.gmap_asce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		if (gmap_asce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			__tlb_flush_idte(gmap_asce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		__tlb_flush_idte(mm->context.asce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		/* Global TLB flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		__tlb_flush_global();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	atomic_dec(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static inline void __tlb_flush_kernel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	if (MACHINE_HAS_IDTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		__tlb_flush_idte(init_mm.context.asce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		__tlb_flush_global();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	spin_lock(&mm->context.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (mm->context.flush_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		mm->context.flush_mm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		__tlb_flush_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	spin_unlock(&mm->context.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * TLB flushing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  *  flush_tlb() - flushes the current mm struct TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *  flush_tlb_all() - flushes all processes TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *  flush_tlb_page(vma, vmaddr) - flushes one page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  *  flush_tlb_range(vma, start, end) - flushes a range of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * flush_tlb_mm goes together with ptep_set_wrprotect for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * copy_page_range operation and flush_tlb_range is related to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * ptep_get_and_clear do not flush the TLBs directly if the mm has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * only one user. At the end of the update the flush_tlb_mm and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * flush_tlb_range functions need to do the flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define flush_tlb()				do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define flush_tlb_all()				do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define flush_tlb_page(vma, addr)		do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static inline void flush_tlb_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	__tlb_flush_mm_lazy(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static inline void flush_tlb_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 				   unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	__tlb_flush_mm_lazy(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline void flush_tlb_kernel_range(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 					  unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	__tlb_flush_kernel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif /* _S390_TLBFLUSH_H */