Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* arch/sparc64/mm/tlb.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2004 David S. Miller <davem@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/preempt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) /* Heavily inspired by the ppc64 code.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) void flush_tlb_pending(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct mm_struct *mm = tb->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	if (!tb->tlb_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	flush_tsb_user(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	if (CTX_VALID(mm->context)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		if (tb->tlb_nr == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 			global_flush_tlb_page(mm, tb->vaddrs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 					      &tb->vaddrs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 					    tb->tlb_nr, &tb->vaddrs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	tb->tlb_nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	put_cpu_var(tlb_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) void arch_enter_lazy_mmu_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	tb->active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) void arch_leave_lazy_mmu_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	if (tb->tlb_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		flush_tlb_pending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	tb->active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 			      bool exec, unsigned int hugepage_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	unsigned long nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	vaddr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (exec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		vaddr |= 0x1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	nr = tb->tlb_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	if (unlikely(nr != 0 && mm != tb->mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		flush_tlb_pending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (!tb->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		flush_tsb_user_page(mm, vaddr, hugepage_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		global_flush_tlb_page(mm, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	if (nr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		tb->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		tb->hugepage_shift = hugepage_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if (tb->hugepage_shift != hugepage_shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		flush_tlb_pending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		tb->hugepage_shift = hugepage_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	tb->vaddrs[nr] = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	tb->tlb_nr = ++nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	if (nr >= TLB_BATCH_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		flush_tlb_pending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	put_cpu_var(tlb_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		   pte_t *ptep, pte_t orig, int fullmm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		   unsigned int hugepage_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (tlb_type != hypervisor &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	    pte_dirty(orig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		unsigned long paddr, pfn = pte_pfn(orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			goto no_cache_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		if (PageReserved(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			goto no_cache_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		/* A real file page? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		if (!mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			goto no_cache_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		paddr = (unsigned long) page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		if ((paddr ^ vaddr) & (1 << 13))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			flush_dcache_page_all(mm, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) no_cache_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (!fullmm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			       pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	pte = pte_offset_map(&pmd, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	end = vaddr + HPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	while (vaddr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		if (pte_val(*pte) & _PAGE_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			bool exec = pte_exec(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		pte++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		vaddr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	pte_unmap(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			   pmd_t orig, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (mm == &init_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		 * Note that this routine only sets pmds for THP pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		 * Hugetlb pages are handled elsewhere.  We need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		 * for huge zero page.  Huge zero pages are like hugetlb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		 * pages in that there is no RSS, but there is the need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		 * for TSB entries.  So, huge zero page counts go into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		 * hugetlb_pte_count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			if (is_huge_zero_page(pmd_page(pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 				mm->context.hugetlb_pte_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 				mm->context.thp_pte_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			if (is_huge_zero_page(pmd_page(orig)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 				mm->context.hugetlb_pte_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 				mm->context.thp_pte_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		/* Do not try to allocate the TSB hash table if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		 * don't have one already.  We have various locks held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		 * and thus we'll end up doing a GFP_KERNEL allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		 * in an atomic context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		 * Instead, we let the first TLB miss on a hugepage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		 * take care of this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (!pmd_none(orig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		addr &= HPAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		if (pmd_trans_huge(orig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			pte_t orig_pte = __pte(pmd_val(orig));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			bool exec = pte_exec(orig_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 					  REAL_HPAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			tlb_batch_pmd_scan(mm, addr, orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void set_pmd_at(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		pmd_t *pmdp, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	pmd_t orig = *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	*pmdp = pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	__set_pmd_acct(mm, addr, orig, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	pmd_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		old = *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	__set_pmd_acct(vma->vm_mm, address, old, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * This routine is only called when splitting a THP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		     pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	pmd_t old, entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	old = pmdp_establish(vma, address, pmdp, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	 * set_pmd_at() will not be called in a way to decrement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	 * thp_pte_count when splitting a THP, so do it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	 * Sanity check pmd before doing the actual decrement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	    !is_huge_zero_page(pmd_page(entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		(vma->vm_mm)->context.thp_pte_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 				pgtable_t pgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	struct list_head *lh = (struct list_head *) pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	assert_spin_locked(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	/* FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (!pmd_huge_pte(mm, pmdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		INIT_LIST_HEAD(lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	pmd_huge_pte(mm, pmdp) = pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct list_head *lh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	pgtable_t pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	assert_spin_locked(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	/* FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	pgtable = pmd_huge_pte(mm, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	lh = (struct list_head *) pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (list_empty(lh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		pmd_huge_pte(mm, pmdp) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		list_del(lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	pte_val(pgtable[0]) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	pte_val(pgtable[1]) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	return pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */