^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef ioremap_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /* temporary while we convert existing ioremap_cache users to memremap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) return ioremap(offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #ifndef arch_memremap_wb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) return (__force void *)ioremap_cache(offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #ifndef arch_memremap_can_ram_remap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static void *try_ram_remap(resource_size_t offset, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long pfn = PHYS_PFN(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* In the simple case just return the existing linear address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) arch_memremap_can_ram_remap(offset, size, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return __va(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return NULL; /* fallback to arch_memremap_wb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * memremap() - remap an iomem_resource as cacheable memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @offset: iomem resource start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * @size: size of remap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * MEMREMAP_ENC, MEMREMAP_DEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * memremap() is "ioremap" for cases where it is known that the resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * being mapped does not have i/o side effects and the __iomem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * annotation is not applicable. In the case of multiple flags, the different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * mapping types will be attempted in the order listed below until one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * them succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * MEMREMAP_WB - matches the default mapping for System RAM on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * the architecture. This is usually a read-allocate write-back cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * memremap() will bypass establishing a new mapping and instead return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * a pointer into the direct map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * MEMREMAP_WT - establish a mapping whereby writes either bypass the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * cache or are written through to memory and never exist in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * cache-dirty state with respect to program visibility. Attempts to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * map System RAM with this mapping type will fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * uncached. Attempts to map System RAM with this mapping type will fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void *memremap(resource_size_t offset, size_t size, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int is_ram = region_intersects(offset, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void *addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (!flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (is_ram == REGION_MIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) &offset, (unsigned long) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Try all mapping types requested until one returns non-NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (flags & MEMREMAP_WB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * MEMREMAP_WB is special in that it can be satisfied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * from the direct map. Some archs depend on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * capability of memremap() to autodetect cases where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * the requested range is potentially in System RAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (is_ram == REGION_INTERSECTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) addr = try_ram_remap(offset, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) addr = arch_memremap_wb(offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * If we don't have a mapping yet and other request flags are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * present then we will be attempting to establish a new virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * address mapping. Enforce that this mapping is not aliasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * System RAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) &offset, (unsigned long) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (!addr && (flags & MEMREMAP_WT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) addr = ioremap_wt(offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!addr && (flags & MEMREMAP_WC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) addr = ioremap_wc(offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) EXPORT_SYMBOL(memremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void memunmap(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (is_ioremap_addr(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) iounmap((void __iomem *) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) EXPORT_SYMBOL(memunmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void devm_memremap_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) memunmap(*(void **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static int devm_memremap_match(struct device *dev, void *res, void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return *(void **)res == match_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void *devm_memremap(struct device *dev, resource_size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) size_t size, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void **ptr, *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) addr = memremap(offset, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *ptr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return ERR_PTR(-ENXIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) EXPORT_SYMBOL(devm_memremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void devm_memunmap(struct device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) WARN_ON(devres_release(dev, devm_memremap_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) devm_memremap_match, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) EXPORT_SYMBOL(devm_memunmap);