Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Based on arch/arm/include/asm/cacheflush.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 1999-2002 Russell King.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #ifndef __ASM_CACHEFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define __ASM_CACHEFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kgdb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * This flag is used to indicate that the page pointed to by a pte is clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * and does not require cleaning before returning it to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define PG_dcache_clean PG_arch_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *	MM Cache Management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *	===================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *	The arch/arm64/mm/cache.S implements these methods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *	Start addresses are inclusive and end addresses are exclusive; start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *	addresses should be rounded down, end addresses up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *	See Documentation/core-api/cachetlb.rst for more information. Please note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *	the implementation assumes non-aliasing VIPT D-cache and (aliasing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *	VIPT I-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *	flush_cache_mm(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *		Clean and invalidate all user space cache entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *		before a change of page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *	flush_icache_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *		Ensure coherency between the I-cache and the D-cache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *		region described by start, end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *		- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *		- end    - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *	invalidate_icache_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *		Invalidate the I-cache in the region described by start, end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *		- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *		- end    - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *	__flush_cache_user_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *		Ensure coherency between the I-cache and the D-cache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *		region described by start, end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *		- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  *		- end    - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *	__flush_dcache_area(kaddr, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  *		Ensure that the data held in page is written back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *		- kaddr  - page address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *		- size   - region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) extern void __flush_icache_range(unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) extern int  invalidate_icache_range(unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) extern void __flush_dcache_area(void *addr, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) extern void __inval_dcache_area(void *addr, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) extern void __clean_dcache_area_poc(void *addr, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) extern void __clean_dcache_area_pop(void *addr, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) extern void __clean_dcache_area_pou(void *addr, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) extern long __flush_cache_user_range(unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) extern void sync_icache_aliases(void *kaddr, unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static inline void flush_icache_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	__flush_icache_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * IPI all online CPUs so that they undergo a context synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 * event and are forced to refetch the new instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 * KGDB performs cache maintenance with interrupts disabled, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 * will deadlock trying to IPI the secondary CPUs. In theory, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * just means that KGDB will elide the maintenance altogether! As it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 * turns out, KGDB uses IPIs to round-up the secondary CPUs during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 * the patching operation, so we don't need extra IPIs here anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * In which case, add a KGDB-specific bodge and return early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	if (in_dbg_master())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	kick_all_cpus_sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define flush_icache_range flush_icache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * Cache maintenance functions used by the DMA API. No to be used directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) extern void __dma_map_area(const void *, size_t, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) extern void __dma_unmap_area(const void *, size_t, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) extern void __dma_flush_area(const void *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * Copy user data from/to a page which is mapped into a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * processes address space.  Really, we want to allow our "user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * space" model to handle this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) extern void copy_to_user_page(struct vm_area_struct *, struct page *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	unsigned long, void *, const void *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define copy_to_user_page copy_to_user_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * flush_dcache_page is used when the kernel has written to the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * cache page at virtual address page->virtual.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * If this page isn't mapped (ie, page_mapping == NULL), or it might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * have userspace mappings, then we _must_ always clean + invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * the dcache entries associated with the kernel mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * Otherwise we can defer the operation, and clean the cache when we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * about to change to user space.  This is the same method as used on SPARC64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * See update_mmu_cache for the user space part.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) extern void flush_dcache_page(struct page *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static __always_inline void __flush_icache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	asm("ic	ialluis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	dsb(ish);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int set_memory_valid(unsigned long addr, int numpages, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int set_direct_map_invalid_noflush(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int set_direct_map_default_noflush(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #include <asm-generic/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #endif /* __ASM_CACHEFLUSH_H */