Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/arch/arm/mm/copypage-v6.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/shmparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/cachetype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "mm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #if SHMLBA > 16384
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #error FIX ME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) static DEFINE_RAW_SPINLOCK(v6_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * Copy the user page.  No aliasing to deal with so we can just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * attack the kernel's existing mapping of these pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static void v6_copy_user_highpage_nonaliasing(struct page *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	void *kto, *kfrom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	kfrom = kmap_atomic(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	kto = kmap_atomic(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	copy_page(kto, kfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	kunmap_atomic(kto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	kunmap_atomic(kfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * Clear the user page.  No aliasing to deal with so we can just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * attack the kernel's existing mapping of this page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	void *kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	clear_page(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * Discard data in the kernel mapping for the new page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * FIXME: needs this MCRR to be supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static void discard_old_kernel_data(void *kto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	__asm__("mcrr	p15, 0, %1, %0, c6	@ 0xec401f06"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	   :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	   : "r" (kto),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	     "r" ((unsigned long)kto + PAGE_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	   : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * Copy the page, taking account of the cache colour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static void v6_copy_user_highpage_aliasing(struct page *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	unsigned int offset = CACHE_COLOUR(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	unsigned long kfrom, kto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		__flush_dcache_page(page_mapping_file(from), from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	/* FIXME: not highmem safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	discard_old_kernel_data(page_address(to));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 * Now copy the page using the same cache colour as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	 * pages ultimate destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	raw_spin_lock(&v6_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	kto   = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	copy_page((void *)kto, (void *)kfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	raw_spin_unlock(&v6_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * Clear the user page.  We need to deal with the aliasing issues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * so remap the kernel page into the same cache colour as the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	/* FIXME: not highmem safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	discard_old_kernel_data(page_address(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 * Now clear the page using the same cache colour as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * the pages ultimate destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	raw_spin_lock(&v6_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	set_top_pte(to, mk_pte(page, PAGE_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	clear_page((void *)to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	raw_spin_unlock(&v6_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct cpu_user_fns v6_user_fns __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	.cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	.cpu_copy_user_highpage	= v6_copy_user_highpage_nonaliasing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int __init v6_userpage_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	if (cache_is_vipt_aliasing()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) core_initcall(v6_userpage_init);