^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) static struct kmem_cache *pgd_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #if PAGETABLE_LEVELS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) static struct kmem_cache *pmd_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) void pgd_ctor(void *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) pgd_t *pgd = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) memcpy(pgd + USER_PTRS_PER_PGD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) swapper_pg_dir + USER_PTRS_PER_PGD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) void pgtable_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) pgd_cachep = kmem_cache_create("pgd_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) PAGE_SIZE, SLAB_PANIC, pgd_ctor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #if PAGETABLE_LEVELS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) pmd_cachep = kmem_cache_create("pmd_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) PAGE_SIZE, SLAB_PANIC, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) pgd_t *pgd_alloc(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return kmem_cache_alloc(pgd_cachep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void pgd_free(struct mm_struct *mm, pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) kmem_cache_free(pgd_cachep, pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #if PAGETABLE_LEVELS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) set_pud(pud, __pud((unsigned long)pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return kmem_cache_alloc(pmd_cachep, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void pmd_free(struct mm_struct *mm, pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) kmem_cache_free(pmd_cachep, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #endif /* PAGETABLE_LEVELS > 2 */