Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3)  * Copyright (C) 2017 SiFive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/sbi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static void ipi_remote_fence_i(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) 	return local_flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) void flush_icache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) 	local_flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) 	if (IS_ENABLED(CONFIG_RISCV_SBI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) 		sbi_remote_fence_i(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) 		on_each_cpu(ipi_remote_fence_i, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) EXPORT_SYMBOL(flush_icache_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)  * Performs an icache flush for the given MM context.  RISC-V has no direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)  * mechanism for instruction cache shoot downs, so instead we send an IPI that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)  * informs the remote harts they need to flush their local instruction caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)  * To avoid pathologically slow behavior in a common case (a bunch of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)  * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)  * IPIs for harts that are not currently executing a MM context and instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)  * schedule a deferred local instruction cache flush to be performed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)  * execution resumes on each hart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void flush_icache_mm(struct mm_struct *mm, bool local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 	cpumask_t others, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 	/* Mark every hart's icache as needing a flush for this MM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 	mask = &mm->context.icache_stale_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) 	cpumask_setall(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) 	/* Flush this hart's I$ now, and mark it as flushed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 	cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 	cpumask_clear_cpu(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 	local_flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) 	 * Flush the I$ of other harts concurrently executing, and mark them as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 	 * flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 	local |= cpumask_empty(&others);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) 	if (mm == current->active_mm && local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) 		 * It's assumed that at least one strongly ordered operation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) 		 * performed on this hart between setting a hart's cpumask bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) 		 * and scheduling this MM context on that hart.  Sending an SBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) 		 * remote message will do this, but in the case where no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) 		 * messages are sent we still need to order this hart's writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) 		 * with flush_icache_deferred().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) 	} else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) 		cpumask_t hartid_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 		riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 		sbi_remote_fence_i(cpumask_bits(&hartid_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) 		on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) void flush_icache_pte(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) 	struct page *page = pte_page(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) 		flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #endif /* CONFIG_MMU */