^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_EARLY_IOREMAP_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_EARLY_IOREMAP_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * early_ioremap() and early_iounmap() are for temporary early boot-time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * mappings, before the real ioremap() is functional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) extern void __iomem *early_ioremap(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) extern void *early_memremap(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) extern void *early_memremap_ro(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) extern void *early_memremap_prot(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned long size, unsigned long prot_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) extern void early_iounmap(void __iomem *addr, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) extern void early_memunmap(void *addr, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Weak function called by early_ioremap_reset(). It does nothing, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * architectures may provide their own version to do any needed cleanups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) extern void early_ioremap_shutdown(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Arch-specific initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) extern void early_ioremap_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Generic initialization called by architecture code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) extern void early_ioremap_setup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Called as last step in paging_init() so library can act
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * accordingly for subsequent map/unmap requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) extern void early_ioremap_reset(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Early copy from unmapped memory to kernel mapped memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) extern void copy_from_early_mem(void *dest, phys_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline void early_ioremap_init(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static inline void early_ioremap_setup(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline void early_ioremap_reset(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #endif /* _ASM_EARLY_IOREMAP_H_ */