^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2019 SiFive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/pagewalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) struct pageattr_masks {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) pgprot_t set_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) pgprot_t clear_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct pageattr_masks *masks = walk->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) unsigned long new_val = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) new_val &= ~(pgprot_val(masks->clear_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) new_val |= (pgprot_val(masks->set_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long next, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pgd_t val = READ_ONCE(*pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (pgd_leaf(val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) val = __pgd(set_pageattr_masks(pgd_val(val), walk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) set_pgd(pgd, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long next, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) p4d_t val = READ_ONCE(*p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (p4d_leaf(val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) val = __p4d(set_pageattr_masks(p4d_val(val), walk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) set_p4d(p4d, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long next, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) pud_t val = READ_ONCE(*pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (pud_leaf(val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) val = __pud(set_pageattr_masks(pud_val(val), walk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) set_pud(pud, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned long next, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) pmd_t val = READ_ONCE(*pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (pmd_leaf(val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) val = __pmd(set_pageattr_masks(pmd_val(val), walk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) set_pmd(pmd, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long next, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) pte_t val = READ_ONCE(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) val = __pte(set_pageattr_masks(pte_val(val), walk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) set_pte(pte, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int pageattr_pte_hole(unsigned long addr, unsigned long next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int depth, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static const struct mm_walk_ops pageattr_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .pgd_entry = pageattr_pgd_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .p4d_entry = pageattr_p4d_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .pud_entry = pageattr_pud_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) .pmd_entry = pageattr_pmd_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) .pte_entry = pageattr_pte_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .pte_hole = pageattr_pte_hole,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) pgprot_t clear_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned long start = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long end = start + PAGE_SIZE * numpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct pageattr_masks masks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) .set_mask = set_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) .clear_mask = clear_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!numpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) mmap_read_lock(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) &masks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) mmap_read_unlock(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) flush_tlb_kernel_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int set_memory_ro(unsigned long addr, int numpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) __pgprot(_PAGE_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int set_memory_rw(unsigned long addr, int numpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) __pgprot(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int set_memory_x(unsigned long addr, int numpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int set_memory_nx(unsigned long addr, int numpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int set_direct_map_invalid_noflush(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long start = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long end = start + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct pageattr_masks masks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) .set_mask = __pgprot(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) .clear_mask = __pgprot(_PAGE_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) mmap_read_lock(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) mmap_read_unlock(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int set_direct_map_default_noflush(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned long start = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned long end = start + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct pageattr_masks masks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) .set_mask = PAGE_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) .clear_mask = __pgprot(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) mmap_read_lock(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mmap_read_unlock(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void __kernel_map_pages(struct page *page, int numpages, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!debug_pagealloc_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __set_memory((unsigned long)page_address(page), numpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __pgprot(_PAGE_PRESENT), __pgprot(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __set_memory((unsigned long)page_address(page), numpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) __pgprot(0), __pgprot(_PAGE_PRESENT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }