Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <misc/cxl-base.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/powernv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/ultravisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <mm/mmu_decl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <trace/events/thp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) unsigned long __pmd_frag_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) EXPORT_SYMBOL(__pmd_frag_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) unsigned long __pmd_frag_size_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) EXPORT_SYMBOL(__pmd_frag_size_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * This is called when relaxing access to a hugepage. It's also called in the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * fault path when we don't hit any of the major fault cases, ie, a minor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * handled those two for us, we additionally deal with missing execute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * permission here on some processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 			  pmd_t *pmdp, pmd_t entry, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	int changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	changed = !pmd_same(*(pmdp), entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	if (changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		 * We can use MMU_PAGE_2M here, because only radix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		 * path look at the psize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		__ptep_set_access_flags(vma, pmdp_ptep(pmdp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 					pmd_pte(entry), address, MMU_PAGE_2M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) int pmdp_test_and_clear_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			      unsigned long address, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * set a new huge pmd. We should not be called for updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * an existing pmd entry. That should go via pmd_hugepage_update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) void set_pmd_at(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		pmd_t *pmdp, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * Make sure hardware valid bit is not set. We don't do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 * tlb flush for this update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	assert_spin_locked(pmd_lockptr(mm, pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	WARN_ON(!(pmd_large(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	trace_hugepage_set_pmd(addr, pmd_val(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static void do_nothing(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * Serialize against find_current_mm_pte which does lock-less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * lookup in page tables with local interrupts disabled. For huge pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * it casts pmd_t to pte_t. Since format of pte_t is different from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * pmd_t we want to prevent transit from pmd pointing to page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * to pmd pointing to huge page (and back) while interrupts are disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * We clear pmd to possibly replace it with page table pointer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * different code paths. So make sure we wait for the parallel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * find_current_mm_pte to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) void serialize_against_pte_lookup(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * We use this to invalidate a pmdp entry before switching from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * hugepte to regular pmd entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		     pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	unsigned long old_pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	return __pmd(old_pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				   unsigned long addr, pmd_t *pmdp, int full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		   !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 * if it not a fullmm flush, then we can possibly end up converting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * Make sure we flush the tlb in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (!full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	unsigned long pmdv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return pmd_set_protbits(__pmd(pmdv), pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	return pfn_pmd(page_to_pfn(page), pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	unsigned long pmdv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	pmdv = pmd_val(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	pmdv &= _HPAGE_CHG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	return pmd_set_protbits(__pmd(pmdv), newprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* For use by kexec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void mmu_cleanup_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		radix__mmu_cleanup_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	else if (mmu_hash_ops.hpte_clear_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		mmu_hash_ops.hpte_clear_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	reset_sprs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int __meminit create_section_mapping(unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 				     int nid, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		return radix__create_section_mapping(start, end, nid, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	return hash__create_section_mapping(start, end, nid, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int __meminit remove_section_mapping(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		return radix__remove_section_mapping(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	return hash__remove_section_mapping(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #endif /* CONFIG_MEMORY_HOTPLUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void __init mmu_partition_table_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	unsigned long ptcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	/* Initialize the Partition Table with no entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	partition_tb = memblock_alloc(patb_size, patb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (!partition_tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		      __func__, patb_size, patb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	 * update partition table control register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	 * 64 K size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	set_ptcr_when_no_uv(ptcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	powernv_set_nmmu_ptcr(ptcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void flush_partition(unsigned int lpid, bool radix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (radix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		radix__flush_all_lpid(lpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		radix__flush_all_lpid_guest(lpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		asm volatile("ptesync" : : : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			     "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		/* do we need fixup here ?*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		asm volatile("eieio; tlbsync; ptesync" : : : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 				  unsigned long dw1, bool flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 * When ultravisor is enabled, the partition table is stored in secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	 * memory and can only be accessed doing an ultravisor call. However, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	 * maintain a copy of the partition table in normal memory to allow Nest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	 * MMU translations to occur (for normal VMs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	 * Therefore, here we always update partition_tb, regardless of whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	 * we are running under an ultravisor or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	partition_tb[lpid].patb0 = cpu_to_be64(dw0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	partition_tb[lpid].patb1 = cpu_to_be64(dw1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	 * If ultravisor is enabled, we do an ultravisor call to register the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	 * partition table entry (PATE), which also do a global flush of TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	 * and partition table caches for the lpid. Otherwise, just do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	 * flush. The type of flush (hash or radix) depends on what the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	 * use of the partition ID was, not the new use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		uv_register_pate(lpid, dw0, dw1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			dw0, dw1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	} else if (flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		 * Boot does not need to flush, because MMU is off and each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		 * CPU does a tlbiel_all() before switching them on, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		 * flushes everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		flush_partition(lpid, (old & PATB_HR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	void *pmd_frag, *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (PMD_FRAG_NR == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	spin_lock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	ret = mm->context.pmd_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		pmd_frag = ret + PMD_FRAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		 * If we have taken up all the fragments mark PTE page NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			pmd_frag = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		mm->context.pmd_frag = pmd_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	spin_unlock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	return (pmd_t *)ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	void *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (mm == &init_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		gfp &= ~__GFP_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	page = alloc_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (!pgtable_pmd_page_ctor(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		__free_pages(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	atomic_set(&page->pt_frag_refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	ret = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 * if we support only one fragment just return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 * allocated page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	if (PMD_FRAG_NR == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	spin_lock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	 * If we find pgtable_page set, we return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	 * the allocated page with single fragement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 * count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	if (likely(!mm->context.pmd_frag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	spin_unlock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	return (pmd_t *)ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	pmd = get_pmd_from_cache(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	if (pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	return __alloc_for_pmdcache(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) void pmd_fragment_free(unsigned long *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	struct page *page = virt_to_page(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	if (PageReserved(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		return free_reserved_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		pgtable_pmd_page_dtor(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		__free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static inline void pgtable_free(void *table, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	case PTE_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		pte_fragment_free(table, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	case PMD_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		pmd_fragment_free(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	case PUD_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		__pud_free(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		/* 16M hugepd directory at pud level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	case HTLB_16M_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		/* 16G hugepd directory at the pgd level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	case HTLB_16G_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		/* We don't free pgd table via RCU callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	unsigned long pgf = (unsigned long)table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	pgf |= index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	tlb_remove_table(tlb, (void *)pgf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) void __tlb_remove_table(void *_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	return pgtable_free(table, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) void arch_report_meminfo(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	 * Hash maps the memory with one size mmu_linear_psize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	 * So don't bother to print these on hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (!radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	seq_printf(m, "DirectMap4k:    %8lu kB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		   atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	seq_printf(m, "DirectMap64k:    %8lu kB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		   atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	seq_printf(m, "DirectMap2M:    %8lu kB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		   atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	seq_printf(m, "DirectMap1G:    %8lu kB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		   atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #endif /* CONFIG_PROC_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			     pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	unsigned long pte_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	 * Clear the _PAGE_PRESENT so that no hardware parallel update is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	 * possible. Also keep the pte_present true so that we don't take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	 * wrong fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	return __pte(pte_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			     pte_t *ptep, pte_t old_pte, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		return radix__ptep_modify_prot_commit(vma, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 						      ptep, old_pte, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	set_pte_at(vma->vm_mm, addr, ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)  * For hash translation mode, we use the deposited table to store hash slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)  * information and they are stored at PTRS_PER_PMD offset from related pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)  * location. Hence a pmd move requires deposit and withdraw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)  * For radix translation with split pmd ptl, we store the deposited table in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)  * pmd page. Hence if we have different pmd page we need to withdraw during pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)  * move.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * With hash we use deposited table always irrespective of anon or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  * With radix we use deposited table only for anonymous mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			   struct spinlock *old_pmd_ptl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 			   struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)  * Does the CPU support tlbie?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) bool tlbie_capable __read_mostly = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) EXPORT_SYMBOL(tlbie_capable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)  * Should tlbie be used for management of CPU TLBs, for kernel and process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)  * address spaces? tlbie may still be used for nMMU accelerators, and for KVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)  * guest address spaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) bool tlbie_enabled __read_mostly = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static int __init setup_disable_tlbie(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	if (!radix_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	tlbie_capable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	tlbie_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)         return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) __setup("disable_tlbie", setup_disable_tlbie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static int __init pgtable_debugfs_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	if (!tlbie_capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	 * There is no locking vs tlb flushing when changing this value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	 * The tlb flushers will see one value or another, and use either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	 * tlbie or tlbiel with IPIs. In both cases the TLBs will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	 * invalidated as expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	debugfs_create_bool("tlbie_enabled", 0600,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			powerpc_debugfs_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			&tlbie_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) arch_initcall(pgtable_debugfs_setup);