Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) 		      pte_t *pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) 	page = pfn_to_page(pte_pfn(*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) 	if (page == ZERO_PAGE(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) 	if (test_and_set_bit(PG_dcache_clean, &page->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) 	addr = (unsigned long) kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) 	dcache_wb_range(addr, addr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 	if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 		icache_inv_range(addr, addr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) 	kunmap_atomic((void *) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void flush_icache_deferred(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) 	unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) 	cpumask_t *mask = &mm->context.icache_stale_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 	if (cpumask_test_cpu(cpu, mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) 		cpumask_clear_cpu(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) 		 * Ensure the remote hart's writes are visible to this hart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 		 * This pairs with a barrier in flush_icache_mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 		local_icache_inv_all(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void flush_icache_mm_range(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 		unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 	cpumask_t others, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #ifdef CONFIG_CPU_HAS_ICACHE_INS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 	if (mm == current->mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 		icache_inv_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) 	/* Mark every hart's icache as needing a flush for this MM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) 	mask = &mm->context.icache_stale_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) 	cpumask_setall(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) 	/* Flush this hart's I$ now, and mark it as flushed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) 	cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) 	cpumask_clear_cpu(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 	local_icache_inv_all(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 	 * Flush the I$ of other harts concurrently executing, and mark them as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) 	 * flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 	if (mm != current->active_mm || !cpumask_empty(&others)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) 		on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) 		cpumask_clear(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }