^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2012 Regents of the University of California
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef _ASM_RISCV_PGALLOC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define _ASM_RISCV_PGALLOC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm-generic/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static inline void pmd_populate_kernel(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) pmd_t *pmd, pte_t *pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned long pfn = virt_to_pfn(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static inline void pmd_populate(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) pmd_t *pmd, pgtable_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned long pfn = virt_to_pfn(page_address(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #ifndef __PAGETABLE_PMD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long pfn = virt_to_pfn(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #endif /* __PAGETABLE_PMD_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define pmd_pgtable(pmd) pmd_page(pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (likely(pgd != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Copy kernel mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) memcpy(pgd + USER_PTRS_PER_PGD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) init_mm.pgd + USER_PTRS_PER_PGD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #ifndef __PAGETABLE_PMD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #endif /* __PAGETABLE_PMD_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define __pte_free_tlb(tlb, pte, buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) pgtable_pte_page_dtor(pte); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) tlb_remove_page((tlb), pte); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #endif /* _ASM_RISCV_PGALLOC_H */