^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/mmdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "mm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static inline bool __virt_addr_valid(unsigned long x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * high_memory does not get immediately defined, and there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * are early callers of __pa() against PAGE_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) if (!high_memory && x >= PAGE_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * MAX_DMA_ADDRESS is a virtual address that may not correspond to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * actual physical address. Enough code relies on __pa(MAX_DMA_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * that we just need to work around it and always return true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (x == MAX_DMA_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) phys_addr_t __virt_to_phys(unsigned long x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) WARN(!__virt_addr_valid(x),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) "virt_to_phys used for non-linear address: %pK (%pS)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) (void *)x, (void *)x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return __virt_to_phys_nodebug(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) EXPORT_SYMBOL(__virt_to_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) phys_addr_t __phys_addr_symbol(unsigned long x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* This is bounds checking against the kernel image only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * __pa_symbol should only be used on kernel symbol addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) VIRTUAL_BUG_ON(x < (unsigned long)KERNEL_START ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) x > (unsigned long)KERNEL_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return __pa_symbol_nodebug(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) EXPORT_SYMBOL(__phys_addr_symbol);