^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __X86_MM_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __X86_MM_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) void *alloc_low_pages(unsigned int num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) static inline void *alloc_low_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) return alloc_low_pages(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) void early_ioremap_page_table_range_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) unsigned long kernel_physical_mapping_init(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unsigned long page_size_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) pgprot_t prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) unsigned long kernel_physical_mapping_change(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned long page_size_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) void zone_sizes_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) extern int after_bootmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) extern unsigned long tlb_single_page_flush_ceiling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #endif /* __X86_MM_INTERNAL_H */