Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/arch/arm/mm/fault-armv.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Copyright (C) 1995  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *  Modifications for ARM processor (c) 1995-2002 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/bugs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/cachetype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "mm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #if __LINUX_ARM_ARCH__ < 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * We take the easy way out of this problem - we make the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * PTE uncacheable.  However, we leave the write buffer on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * Note that the pte lock held when calling update_mmu_cache must also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * guard the pte (somewhere else in the same mm) that we modify here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * Therefore those configurations which might call adjust_pte (those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	unsigned long pfn, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	pte_t entry = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	 * If this page is present, it's actually being shared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	ret = pte_present(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	 * If this page isn't present, or is already setup to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 * fault (ie, is old), we can safely ignore any issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		flush_cache_page(vma, address, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		outer_flush_range((pfn << PAGE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 				  (pfn << PAGE_SHIFT) + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		pte_val(entry) &= ~L_PTE_MT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		pte_val(entry) |= shared_pte_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		set_pte_at(vma->vm_mm, address, ptep, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		flush_tlb_page(vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #if USE_SPLIT_PTE_PTLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * If we are using split PTE locks, then we need to take the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * lock here.  Otherwise we are using shared mm->page_table_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * which is already locked, thus cannot take it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static inline void do_pte_lock(spinlock_t *ptl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 * Use nested version here to indicate that we are already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 * holding one similar spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static inline void do_pte_unlock(spinlock_t *ptl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #else /* !USE_SPLIT_PTE_PTLOCKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static inline void do_pte_lock(spinlock_t *ptl) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static inline void do_pte_unlock(spinlock_t *ptl) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #endif /* USE_SPLIT_PTE_PTLOCKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	pgd = pgd_offset(vma->vm_mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (pgd_none_or_clear_bad(pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	if (p4d_none_or_clear_bad(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if (pud_none_or_clear_bad(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	pmd = pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	if (pmd_none_or_clear_bad(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * This is called while another page table is mapped, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * must use the nested version.  This also means we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 * open-code the spin-locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	ptl = pte_lockptr(vma->vm_mm, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	pte = pte_offset_map(pmd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	do_pte_lock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	ret = do_adjust_pte(vma, address, pfn, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	do_pte_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	pte_unmap(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	unsigned long addr, pte_t *ptep, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct vm_area_struct *mpnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	int aliases = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	 * If we have any shared mappings that are in the same mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	 * space, then we need to handle them specially to maintain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * cache coherency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	flush_dcache_mmap_lock(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		 * If this VMA is not in our MM, we can ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		 * Note that we intentionally mask out the VMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		 * that we are fixing up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		if (mpnt->vm_mm != mm || mpnt == vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		if (!(mpnt->vm_flags & VM_MAYSHARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	flush_dcache_mmap_unlock(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (aliases)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		do_adjust_pte(vma, addr, pfn, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * Take care of architecture specific things when placing a new PTE into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * a page table, or changing an existing PTE.  Basically, there are two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * things that we need to take care of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  *  1. If PG_dcache_clean is not set for the page, we need to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *     that any cache entries for the kernels virtual memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  *     range are written back to the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  *  2. If we have multiple shared mappings of the same space in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  *     an object, we need to deal with the cache aliasing issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  * Note that the pte lock will be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	unsigned long pfn = pte_pfn(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	 * The zero page is never written to, so never has any dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	 * cache lines, and therefore never needs to be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	if (page == ZERO_PAGE(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		__flush_dcache_page(mapping, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		if (cache_is_vivt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			make_coherent(mapping, vma, addr, ptep, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		else if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			__flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #endif	/* __LINUX_ARM_ARCH__ < 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  * Check whether the write buffer has physical address aliasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  * issues.  If it has, we need to avoid them for the case where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  * we have several shared mappings of the same object in user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)  * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	register unsigned long zero = 0, one = 1, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	*p1 = one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	*p2 = zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	val = *p1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	return val != zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) void __init check_writebuffer_bugs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	const char *reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	unsigned long v = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	pr_info("CPU: Testing write buffer coherency: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		unsigned long *p1, *p2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 					L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		p1 = vmap(&page, 1, VM_IOREMAP, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		p2 = vmap(&page, 1, VM_IOREMAP, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		if (p1 && p2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			v = check_writebuffer(p1, p2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			reason = "enabling work-around";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			reason = "unable to map memory\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		vunmap(p1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		vunmap(p2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		reason = "unable to grab page\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		pr_cont("failed, %s\n", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		shared_pte_mask = L_PTE_MT_UNCACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		pr_cont("ok\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }