^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _H8300_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _H8300_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <asm-generic/pgtable-nopud.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) extern void paging_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define __swp_type(x) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define __swp_offset(x) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define kern_addr_valid(addr) (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define pgprot_writecombine(prot) (prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define pgprot_noncached pgprot_writecombine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static inline int pte_file(pte_t pte) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define swapper_pg_dir ((pgd_t *) 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * ZERO_PAGE is a global shared page that is always zero: used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * for zero-mapped memory areas etc..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define ZERO_PAGE(vaddr) (virt_to_page(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * These would be in other places but having them here reduces the diffs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) extern unsigned int kobjsize(const void *objp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) extern int is_in_rom(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * All 32bit addresses are effectively valid for vmalloc...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Sort of meaningless for non-VM targets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define VMALLOC_START 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define VMALLOC_END 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define arch_enter_lazy_cpu_mode() do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #endif /* _H8300_PGTABLE_H */