Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_IA64_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_IA64_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2002 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *	David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/intrinsics.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) struct ia64_tr_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	u64 ifa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	u64 itir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	u64 pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	u64 rr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) }; /*Record for tr entry!*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) extern void ia64_ptr_entry(u64 target_mask, int slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  region register macros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define RR_VE(val)     (((val) & 0x0000000000000001) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define RR_VE_MASK     0x0000000000000001L
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define RR_VE_SHIFT    0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define RR_TO_PS(val)  (((val) >> 2) & 0x000000000000003f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define RR_PS(val)     (((val) & 0x000000000000003f) << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define RR_PS_MASK     0x00000000000000fcL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define RR_PS_SHIFT    2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define RR_RID_MASK    0x00000000ffffff00L
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define RR_TO_RID(val)         ((val >> 8) & 0xffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * Now for some TLB flushing routines.  This is the kind of stuff that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * can be very expensive, so try to avoid them whenever possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) extern void setup_ptcg_sem(int max_purges, int from_palo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * Flush everything (kernel mapping may also have changed due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * vmalloc/vfree).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) extern void local_flush_tlb_all (void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)   extern void smp_flush_tlb_all (void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)   extern void smp_flush_tlb_mm (struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)   extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) # define flush_tlb_all()	smp_flush_tlb_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) # define flush_tlb_all()	local_flush_tlb_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) # define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) local_finish_flush_tlb_mm (struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	if (mm == current->active_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		activate_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * Flush a specified user mapping.  This is called, e.g., as a result of fork() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * exit().  fork() ends up here because the copy-on-write mechanism needs to write-protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * the PTEs of the parent task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) flush_tlb_mm (struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	set_bit(mm->context, ia64_ctx.flushmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	mm->context = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (atomic_read(&mm->mm_users) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		return;		/* happens as a result of exit_mmap() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	smp_flush_tlb_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	local_finish_flush_tlb_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * Page-granular tlb flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (vma->vm_mm == current->active_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		ia64_ptcl(addr, (PAGE_SHIFT << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		vma->vm_mm->context = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  * Flush the local TLB. Invoked from another cpu using an IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void smp_local_flush_tlb(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define smp_local_flush_tlb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static inline void flush_tlb_kernel_range(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 					  unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	flush_tlb_all();	/* XXX fix me */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #endif /* _ASM_IA64_TLBFLUSH_H */