^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This kernel test validates architecture page table helpers and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * accessors and helps in verifying their continued compliance with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * expected generic MM semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2019 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author: Anshuman Khandual <anshuman.khandual@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kconfig.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pfn_t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/swapops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/start_kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * expectations that are being validated here. All future changes in here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * or the documentation need to be in sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * On s390 platform, the lower 4 bits are used to identify given page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * entry type. But these bits might affect the ability to clear entries with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * pxx_clear() because of how dynamic page table folding works on s390. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * while loading up the entries do not change the lower 4 bits. It does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * used to mark a pte entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define S390_SKIP_MASK GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #if __BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define PPC64_SKIP_MASK GENMASK(62, 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define PPC64_SKIP_MASK 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define RANDOM_NZVALUE GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void __init pte_basic_tests(unsigned long pfn, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pgprot_t prot = protection_map[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) pte_t pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long val = idx, *ptr = &val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) pr_debug("Validating PTE basic (%pGv)\n", ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * This test needs to be executed after the given page table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * is created with pfn_pte() to make sure that protection_map[idx]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * does not have the dirty bit enabled from the beginning. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * important for platforms like arm64 where (!PTE_RDONLY) indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * dirty bit being set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) WARN_ON(pte_dirty(pte_wrprotect(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) WARN_ON(!pte_same(pte, pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static void __init pte_advanced_tests(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct vm_area_struct *vma, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long pfn, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pte_t pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Architectures optimize set_pte_at by avoiding TLB flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * This requires set_pte_at to be not used to update an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * existing pte entry. Clear pte before we do set_pte_at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) pr_debug("Validating PTE advanced\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) set_pte_at(mm, vaddr, ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ptep_set_wrprotect(mm, vaddr, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pte = ptep_get(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) WARN_ON(pte_write(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ptep_get_and_clear(mm, vaddr, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) pte = ptep_get(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) WARN_ON(!pte_none(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) pte = pte_wrprotect(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pte = pte_mkclean(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) set_pte_at(mm, vaddr, ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pte = pte_mkwrite(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pte = pte_mkdirty(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) pte = ptep_get(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ptep_get_and_clear_full(mm, vaddr, ptep, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pte = ptep_get(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) WARN_ON(!pte_none(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) pte = pte_mkyoung(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) set_pte_at(mm, vaddr, ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ptep_test_and_clear_young(vma, vaddr, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) pte = ptep_get(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) WARN_ON(pte_young(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ptep_get_and_clear_full(mm, vaddr, ptep, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) pte_t pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) pr_debug("Validating PTE saved write\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void __init pmd_basic_tests(unsigned long pfn, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) pgprot_t prot = protection_map[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned long val = idx, *ptr = &val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) pr_debug("Validating PMD basic (%pGv)\n", ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) pmd = pfn_pmd(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * This test needs to be executed after the given page table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * is created with pfn_pmd() to make sure that protection_map[idx]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * does not have the dirty bit enabled from the beginning. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * important for platforms like arm64 where (!PTE_RDONLY) indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * dirty bit being set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) WARN_ON(!pmd_same(pmd, pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * A huge page does not point to next level page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * entry. Hence this must qualify as pmd_bad().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void __init pmd_advanced_tests(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct vm_area_struct *vma, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned long pfn, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) pgprot_t prot, pgtable_t pgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pr_debug("Validating PMD advanced\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* Align the address wrt HPAGE_PMD_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) vaddr &= HPAGE_PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) pgtable_trans_huge_deposit(mm, pmdp, pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pmd = pfn_pmd(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) set_pmd_at(mm, vaddr, pmdp, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pmdp_set_wrprotect(mm, vaddr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) WARN_ON(pmd_write(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) pmdp_huge_get_and_clear(mm, vaddr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) WARN_ON(!pmd_none(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) pmd = pfn_pmd(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) pmd = pmd_wrprotect(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pmd = pmd_mkclean(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) set_pmd_at(mm, vaddr, pmdp, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pmd = pmd_mkwrite(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) pmd = pmd_mkdirty(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) WARN_ON(!pmd_none(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) pmd = pmd_mkyoung(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) set_pmd_at(mm, vaddr, pmdp, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pmdp_test_and_clear_young(vma, vaddr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) WARN_ON(pmd_young(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Clear the pte entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pmdp_huge_get_and_clear(mm, vaddr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) pgtable = pgtable_trans_huge_withdraw(mm, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pr_debug("Validating PMD leaf\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) pmd = pfn_pmd(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * PMD based THP is a leaf entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) pmd = pmd_mkhuge(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) WARN_ON(!pmd_leaf(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!arch_ioremap_pmd_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) pr_debug("Validating PMD huge\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * X86 defined pmd_set_huge() verifies that the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * PMD is not a populated non-leaf entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) WRITE_ONCE(*pmdp, __pmd(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) WARN_ON(!pmd_clear_huge(pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) WARN_ON(!pmd_none(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pr_debug("Validating PMD saved write\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) pmd = pfn_pmd(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) pgprot_t prot = protection_map[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned long val = idx, *ptr = &val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) pud_t pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) pr_debug("Validating PUD basic (%pGv)\n", ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) pud = pfn_pud(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * This test needs to be executed after the given page table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * is created with pfn_pud() to make sure that protection_map[idx]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * does not have the dirty bit enabled from the beginning. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * important for platforms like arm64 where (!PTE_RDONLY) indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * dirty bit being set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) WARN_ON(pud_dirty(pud_wrprotect(pud)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) WARN_ON(!pud_same(pud, pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (mm_pmd_folded(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * A huge page does not point to next level page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * entry. Hence this must qualify as pud_bad().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) WARN_ON(!pud_bad(pud_mkhuge(pud)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void __init pud_advanced_tests(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct vm_area_struct *vma, pud_t *pudp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned long pfn, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pud_t pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) pr_debug("Validating PUD advanced\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Align the address wrt HPAGE_PUD_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) vaddr &= HPAGE_PUD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pud = pfn_pud(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) set_pud_at(mm, vaddr, pudp, pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pudp_set_wrprotect(mm, vaddr, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) WARN_ON(pud_write(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #ifndef __PAGETABLE_PMD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) pudp_huge_get_and_clear(mm, vaddr, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) WARN_ON(!pud_none(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #endif /* __PAGETABLE_PMD_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pud = pfn_pud(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) pud = pud_wrprotect(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) pud = pud_mkclean(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) set_pud_at(mm, vaddr, pudp, pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) pud = pud_mkwrite(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) pud = pud_mkdirty(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #ifndef __PAGETABLE_PMD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) WARN_ON(!pud_none(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #endif /* __PAGETABLE_PMD_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pud = pfn_pud(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) pud = pud_mkyoung(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) set_pud_at(mm, vaddr, pudp, pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pudp_test_and_clear_young(vma, vaddr, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) WARN_ON(pud_young(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) pudp_huge_get_and_clear(mm, vaddr, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) pud_t pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) pr_debug("Validating PUD leaf\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pud = pfn_pud(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * PUD based THP is a leaf entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) pud = pud_mkhuge(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) WARN_ON(!pud_leaf(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) pud_t pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!arch_ioremap_pud_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) pr_debug("Validating PUD huge\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * X86 defined pud_set_huge() verifies that the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * PUD is not a populated non-leaf entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) WRITE_ONCE(*pudp, __pud(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) WARN_ON(!pud_clear_huge(pudp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) WARN_ON(!pud_none(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void __init pud_advanced_tests(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct vm_area_struct *vma, pud_t *pudp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned long pfn, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void __init pmd_basic_tests(unsigned long pfn, int idx) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static void __init pmd_advanced_tests(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct vm_area_struct *vma, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unsigned long pfn, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) pgprot_t prot, pgtable_t pgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static void __init pud_advanced_tests(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct vm_area_struct *vma, pud_t *pudp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) unsigned long pfn, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) p4d_t p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) pr_debug("Validating P4D basic\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) WARN_ON(!p4d_same(p4d, p4d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pgd_t pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) pr_debug("Validating PGD basic\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) WARN_ON(!pgd_same(pgd, pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) #ifndef __PAGETABLE_PUD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) pud_t pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (mm_pmd_folded(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) pr_debug("Validating PUD clear\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) WRITE_ONCE(*pudp, pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) pud_clear(pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) WARN_ON(!pud_none(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) pud_t pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (mm_pmd_folded(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) pr_debug("Validating PUD populate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * This entry points to next level page table page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Hence this must not qualify as pud_bad().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) pud_populate(mm, pudp, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) WARN_ON(pud_bad(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #else /* !__PAGETABLE_PUD_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #endif /* PAGETABLE_PUD_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) #ifndef __PAGETABLE_P4D_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) p4d_t p4d = READ_ONCE(*p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (mm_pud_folded(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) pr_debug("Validating P4D clear\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) WRITE_ONCE(*p4dp, p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) p4d_clear(p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) p4d = READ_ONCE(*p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) WARN_ON(!p4d_none(p4d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) p4d_t p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (mm_pud_folded(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) pr_debug("Validating P4D populate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * This entry points to next level page table page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Hence this must not qualify as p4d_bad().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) pud_clear(pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) p4d_clear(p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) p4d_populate(mm, p4dp, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) p4d = READ_ONCE(*p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) WARN_ON(p4d_bad(p4d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) pgd_t pgd = READ_ONCE(*pgdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (mm_p4d_folded(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) pr_debug("Validating PGD clear\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) WRITE_ONCE(*pgdp, pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) pgd_clear(pgdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) pgd = READ_ONCE(*pgdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) WARN_ON(!pgd_none(pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) p4d_t *p4dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) pgd_t pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (mm_p4d_folded(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) pr_debug("Validating PGD populate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * This entry points to next level page table page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * Hence this must not qualify as pgd_bad().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) p4d_clear(p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) pgd_clear(pgdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pgd_populate(mm, pgdp, p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) pgd = READ_ONCE(*pgdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) WARN_ON(pgd_bad(pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) #else /* !__PAGETABLE_P4D_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) p4d_t *p4dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) #endif /* PAGETABLE_P4D_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) unsigned long pfn, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) pte_t pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) pr_debug("Validating PTE clear\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) #ifndef CONFIG_RISCV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) set_pte_at(mm, vaddr, ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) pte_clear(mm, vaddr, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pte = ptep_get(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) WARN_ON(!pte_none(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) pmd_t pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) pr_debug("Validating PMD clear\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) WRITE_ONCE(*pmdp, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) pmd_clear(pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) WARN_ON(!pmd_none(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) pgtable_t pgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) pr_debug("Validating PMD populate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * This entry points to next level page table page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * Hence this must not qualify as pmd_bad().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) pmd_populate(mm, pmdp, pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) WARN_ON(pmd_bad(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) pte_t pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) pr_debug("Validating PTE special\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) WARN_ON(!pte_special(pte_mkspecial(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) pte_t pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) pr_debug("Validating PTE protnone\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) WARN_ON(!pte_protnone(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) WARN_ON(!pte_present(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) pr_debug("Validating PMD protnone\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) WARN_ON(!pmd_protnone(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) WARN_ON(!pmd_present(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) pte_t pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) pr_debug("Validating PTE devmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) pr_debug("Validating PMD devmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) pmd = pfn_pmd(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) pud_t pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) pr_debug("Validating PUD devmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) pud = pfn_pud(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) #else /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) pte_t pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) pr_debug("Validating PTE soft dirty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) pte_t pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) pr_debug("Validating PTE swap soft dirty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) pr_debug("Validating PMD soft dirty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) pmd = pfn_pmd(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) pr_debug("Validating PMD swap soft dirty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) pmd = pfn_pmd(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) #else /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) swp_entry_t swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) pr_debug("Validating PTE swap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) swp = __pte_to_swp_entry(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) pte = __swp_entry_to_pte(swp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) WARN_ON(pfn != pte_pfn(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) swp_entry_t swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) pr_debug("Validating PMD swap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) pmd = pfn_pmd(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) swp = __pmd_to_swp_entry(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) pmd = __swp_entry_to_pmd(swp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) WARN_ON(pfn != pmd_pfn(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) static void __init swap_migration_tests(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) swp_entry_t swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (!IS_ENABLED(CONFIG_MIGRATION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) pr_debug("Validating swap migration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * swap_migration_tests() requires a dedicated page as it needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * be locked before creating a migration entry from it. Locking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * page that actually maps kernel text ('start_kernel') can be real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * problematic. Lets allocate a dedicated page explicitly for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * purpose that will be freed subsequently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) pr_err("page allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * make_migration_entry() expects given page to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * locked, otherwise it stumbles upon a BUG_ON().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) __SetPageLocked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) swp = make_migration_entry(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) WARN_ON(!is_migration_entry(swp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) WARN_ON(!is_write_migration_entry(swp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) make_migration_entry_read(&swp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) WARN_ON(!is_migration_entry(swp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) WARN_ON(is_write_migration_entry(swp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) swp = make_migration_entry(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) WARN_ON(!is_migration_entry(swp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) WARN_ON(is_write_migration_entry(swp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) __ClearPageLocked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) pr_debug("Validating HugeTLB basic\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * Accessing the page associated with the pfn is safe here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * as it was previously derived from a real kernel symbol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) pte = mk_huge_pte(page, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) pte = pfn_pte(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) WARN_ON(!pte_huge(pte_mkhuge(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) #else /* !CONFIG_HUGETLB_PAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) #endif /* CONFIG_HUGETLB_PAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) pr_debug("Validating PMD based THP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * pmd_trans_huge() and pmd_present() must return positive after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * MMU invalidation with pmd_mkinvalid(). This behavior is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * optimization for transparent huge page. pmd_trans_huge() must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * be true if pmd_page() returns a valid THP to avoid taking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * pmd_lock when others walk over non transhuge pmds (i.e. there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * are no THP allocated). Especially when splitting a THP and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * removing the present bit from the pmd, pmd_trans_huge() still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * needs to return true. pmd_present() should be true whenever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * pmd_trans_huge() returns true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) pmd = pfn_pmd(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) #ifndef __HAVE_ARCH_PMDP_INVALIDATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) pud_t pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (!has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) pr_debug("Validating PUD based THP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) pud = pfn_pud(pfn, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * pud_mkinvalid() has been dropped for now. Enable back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * these tests when it comes back with a modified pud_present().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) static unsigned long __init get_random_vaddr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) unsigned long random_vaddr, random_pages, total_user_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) random_pages = get_random_long() % total_user_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return random_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) static int __init debug_vm_pgtable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) pgd_t *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) p4d_t *p4dp, *saved_p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) pud_t *pudp, *saved_pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) pmd_t *pmdp, *saved_pmdp, pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) pgtable_t saved_ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) pgprot_t prot, protnone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) phys_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) unsigned long vaddr, pte_aligned, pmd_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) unsigned long pud_aligned, p4d_aligned, pgd_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) spinlock_t *ptl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) pr_info("Validating architecture page table helpers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) prot = vm_get_page_prot(VMFLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) vaddr = get_random_vaddr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) mm = mm_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (!mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) pr_err("mm_struct allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * __P000 (or even __S000) will help create page table entries with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * PROT_NONE permission as required for pxx_protnone_tests().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) protnone = __P000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) vma = vm_area_alloc(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (!vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) pr_err("vma allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * PFN for mapping at PTE level is determined from a standard kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * text symbol. But pfns for higher page table levels are derived by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * masking lower bits of this real pfn. These derived pfns might not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * exist on the platform but that does not really matter as pfn_pxx()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * helpers will still create appropriate entries for the test. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * helps avoid large memory block allocations to be used for mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * at higher page table levels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) paddr = __pa_symbol(&start_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) WARN_ON(!pfn_valid(pte_aligned));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) pgdp = pgd_offset(mm, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) p4dp = p4d_alloc(mm, pgdp, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) pudp = pud_alloc(mm, p4dp, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) pmdp = pmd_alloc(mm, pudp, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * Allocate pgtable_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (pte_alloc(mm, pmdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) pr_err("pgtable allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * Save all the page table page addresses as the page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * entries will be used for testing with random or garbage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * values. These saved addresses will be used for freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * page table pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) saved_p4dp = p4d_offset(pgdp, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) saved_pudp = pud_offset(p4dp, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) saved_pmdp = pmd_offset(pudp, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) saved_ptep = pmd_pgtable(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * Iterate over the protection_map[] to make sure that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * the basic page table transformation validations just hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * true irrespective of the starting protection value for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * given page table entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) pte_basic_tests(pte_aligned, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) pmd_basic_tests(pmd_aligned, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) pud_basic_tests(mm, pud_aligned, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Both P4D and PGD level tests are very basic which do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * involve creating page table entries from the protection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * value and the given pfn. Hence just keep them out from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * the above iteration for now to save some test execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) p4d_basic_tests(p4d_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) pgd_basic_tests(pgd_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) pmd_leaf_tests(pmd_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) pud_leaf_tests(pud_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) pte_savedwrite_tests(pte_aligned, protnone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) pmd_savedwrite_tests(pmd_aligned, protnone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) pte_special_tests(pte_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) pte_protnone_tests(pte_aligned, protnone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) pmd_protnone_tests(pmd_aligned, protnone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) pte_devmap_tests(pte_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) pmd_devmap_tests(pmd_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) pud_devmap_tests(pud_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) pte_soft_dirty_tests(pte_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) pmd_soft_dirty_tests(pmd_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) pte_swap_soft_dirty_tests(pte_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) pmd_swap_soft_dirty_tests(pmd_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) pte_swap_tests(pte_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) pmd_swap_tests(pmd_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) swap_migration_tests();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) pmd_thp_tests(pmd_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) pud_thp_tests(pud_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) hugetlb_basic_tests(pte_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * Page table modifying tests. They need to hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * proper page table lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) ptl = pmd_lock(mm, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) pmd_clear_tests(mm, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) pmd_huge_tests(pmdp, pmd_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) pmd_populate_tests(mm, pmdp, saved_ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) ptl = pud_lock(mm, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) pud_clear_tests(mm, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) pud_huge_tests(pudp, pud_aligned, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) pud_populate_tests(mm, pudp, saved_pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) spin_lock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) p4d_clear_tests(mm, p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) pgd_clear_tests(mm, pgdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) p4d_populate_tests(mm, p4dp, saved_pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) pgd_populate_tests(mm, pgdp, saved_p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) spin_unlock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) p4d_free(mm, saved_p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) pud_free(mm, saved_pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) pmd_free(mm, saved_pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) pte_free(mm, saved_ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) vm_area_free(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) mm_dec_nr_puds(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) mm_dec_nr_pmds(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) mm_dec_nr_ptes(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) mmdrop(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) late_initcall(debug_vm_pgtable);