Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * copy of this software and associated documentation files (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * "Software"), to deal in the Software without restriction, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * without limitation the rights to use, copy, modify, merge, publish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * distribute, sub license, and/or sell copies of the Software, and to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * The above copyright notice and this permission notice (including the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * next paragraph) shall be included in all copies or substantial portions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * USE OR OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <drm/drm_cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #if defined(CONFIG_X86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * clflushopt is an unordered instruction which needs fencing with mfence or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * sfence to avoid ordering issues.  For drm_clflush_page this fencing happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * in the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) drm_clflush_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	uint8_t *page_virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	const int size = boot_cpu_data.x86_clflush_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (unlikely(page == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	page_virtual = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	for (i = 0; i < PAGE_SIZE; i += size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		clflushopt(page_virtual + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	kunmap_atomic(page_virtual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static void drm_cache_flush_clflush(struct page *pages[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 				    unsigned long num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	for (i = 0; i < num_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		drm_clflush_page(*pages++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	mb(); /*Also used after CLFLUSH so that all cache is flushed*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * drm_clflush_pages - Flush dcache lines of a set of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * @pages: List of pages to be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * @num_pages: Number of pages in the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * Flush every data cache line entry that points to an address belonging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * to a page in the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) drm_clflush_pages(struct page *pages[], unsigned long num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #if defined(CONFIG_X86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		drm_cache_flush_clflush(pages, num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (wbinvd_on_all_cpus())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		pr_err("Timed out waiting for cache flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #elif defined(__powerpc__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	for (i = 0; i < num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		struct page *page = pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		void *page_virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		if (unlikely(page == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		page_virtual = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		flush_dcache_range((unsigned long)page_virtual,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 				   (unsigned long)page_virtual + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		kunmap_atomic(page_virtual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	pr_err("Architecture has no drm_cache.c support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) EXPORT_SYMBOL(drm_clflush_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * @st: struct sg_table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * Flush every data cache line entry that points to an address in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * sg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) drm_clflush_sg(struct sg_table *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #if defined(CONFIG_X86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		struct sg_page_iter sg_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		mb(); /*CLFLUSH is ordered only by using memory barriers*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		for_each_sgtable_page(st, &sg_iter, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			drm_clflush_page(sg_page_iter_page(&sg_iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		mb(); /*Make sure that all cache line entry is flushed*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (wbinvd_on_all_cpus())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		pr_err("Timed out waiting for cache flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	pr_err("Architecture has no drm_cache.c support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) EXPORT_SYMBOL(drm_clflush_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * drm_clflush_virt_range - Flush dcache lines of a region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * @addr: Initial kernel memory address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * @length: Region size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  * Flush every data cache line entry that points to an address in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * region requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) drm_clflush_virt_range(void *addr, unsigned long length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #if defined(CONFIG_X86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		const int size = boot_cpu_data.x86_clflush_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		void *end = addr + length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		addr = (void *)(((unsigned long)addr) & -size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		mb(); /*CLFLUSH is only ordered with a full memory barrier*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		for (; addr < end; addr += size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			clflushopt(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		clflushopt(end - 1); /* force serialisation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		mb(); /*Ensure that evry data cache line entry is flushed*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	if (wbinvd_on_all_cpus())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		pr_err("Timed out waiting for cache flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	pr_err("Architecture has no drm_cache.c support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) EXPORT_SYMBOL(drm_clflush_virt_range);