^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_POWERPC_PGALLOC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_POWERPC_PGALLOC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) if (unlikely(mm == &init_mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) return gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) return gfp | __GFP_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #else /* !MODULE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return gfp | __GFP_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #endif /* MODULE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return (pte_t *)pte_fragment_alloc(mm, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return (pgtable_t)pte_fragment_alloc(mm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void pte_frag_destroy(void *pte_frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void pte_fragment_free(unsigned long *table, int kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) pte_fragment_free((unsigned long *)pte, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) pte_fragment_free((unsigned long *)ptepage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Functions that deal with pagetables that could be at any level of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * the table need to be passed an "index_size" so they know how to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * handle allocation. For PTE pages, the allocation size will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * (2^index_size * sizeof(pointer)) and allocations are drawn from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * the kmem_cache in PGT_CACHE(index_size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * The maximum index size needs to be big enough to allow any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * pagetable sizes we need, but small enough to fit in the low bits of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * any page table pointer. In other words all pagetables, even tiny
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * ones, must be aligned to allow at least enough low 0 bits to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * contain this value. This value is also used as a mask, so it must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * be one less than a power of two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define MAX_PGTABLE_INDEX_SIZE 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) extern struct kmem_cache *pgtable_cache[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define PGT_CACHE(shift) pgtable_cache[shift]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <asm/book3s/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <asm/nohash/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline pgtable_t pmd_pgtable(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return (pgtable_t)pmd_page_vaddr(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #endif /* _ASM_POWERPC_PGALLOC_H */