Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3)  * Copyright (C) 2012 Regents of the University of California
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4)  * Copyright (C) 2017 SiFive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)  * When necessary, performs a deferred icache flush for the given MM context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)  * on the local CPU.  RISC-V has no direct mechanism for instruction cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)  * shoot downs, so instead we send an IPI that informs the remote harts they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)  * need to flush their local instruction caches.  To avoid pathologically slow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)  * behavior in a common case (a bunch of single-hart processes on a many-hart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)  * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)  * executing a MM context and instead schedule a deferred local instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)  * cache flush to be performed before execution resumes on each hart.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)  * actually performs that local instruction cache flush, which implicitly only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)  * refers to the current hart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static inline void flush_icache_deferred(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 	unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) 	cpumask_t *mask = &mm->context.icache_stale_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) 	if (cpumask_test_cpu(cpu, mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) 		cpumask_clear_cpu(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) 		 * Ensure the remote hart's writes are visible to this hart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) 		 * This pairs with a barrier in flush_icache_mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 		local_flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void switch_mm(struct mm_struct *prev, struct mm_struct *next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 	struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) 	if (unlikely(prev == next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 	 * Mark the current MM context as inactive, and the next as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) 	 * active.  This is at least used by the icache flushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) 	 * routines in order to determine who should be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) 	cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 	cpumask_clear_cpu(cpu, mm_cpumask(prev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) 	cpumask_set_cpu(cpu, mm_cpumask(next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) 	csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) 	local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) 	flush_icache_deferred(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }