^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * OMAP4 specific common source file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2010 Texas Instruments, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Santosh Shilimkar <santosh.shilimkar@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/irqchip/arm-gic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/hardware/cache-l2x0.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/mach/map.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/smp_twd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "omap-wakeupgen.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "soc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "iomap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "prminst44xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "prcm_mpu44xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "omap4-sar-layout.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "omap-secure.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "sram.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #ifdef CONFIG_CACHE_L2X0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void __iomem *l2cache_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static void __iomem *sar_ram_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void __iomem *gic_dist_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static void __iomem *twd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define IRQ_LOCALTIMER 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* Used to implement memory barrier on DRAM path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define OMAP4_DRAM_BARRIER_VA 0xfe600000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static void __iomem *dram_sync, *sram_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static phys_addr_t dram_sync_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static u32 dram_sync_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * The OMAP4 bus structure contains asynchronous bridges which can buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * data writes from the MPU. These asynchronous bridges can be found on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * paths between the MPU to EMIF, and the MPU to L3 interconnects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * We need to be careful about re-ordering which can happen as a result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * of different accesses being performed via different paths, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * therefore different asynchronous bridges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * OMAP4 interconnect barrier which is called for each mb() and wmb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * This is to ensure that normal paths to DRAM (normal memory, cacheable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * accesses) are properly synchronised with writes to DMA coherent memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * (normal memory, uncacheable) and device writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * path, as we need to ensure that data is visible to other system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * masters prior to writes to those system masters being seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Note: the SRAM path is not synchronised via mb() and wmb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void omap4_mb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (dram_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) writel_relaxed(0, dram_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * If a data is stalled inside asynchronous bridge because of back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * pressure, it may be accepted multiple times, creating pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * misalignment that will corrupt next transfers on that data path until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * next reset of the system. No recovery procedure once the issue is hit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * the path remains consistently broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Async bridges can be found on paths between MPU to EMIF and MPU to L3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * interconnects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * This situation can happen only when the idle is initiated by a Master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Request Disconnection (which is trigged by software when executing WFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * on the CPU).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * The work-around for this errata needs all the initiators connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * through an async bridge to ensure that data path is properly drained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * before issuing WFI. This condition will be met if one Strongly ordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * access is performed to the target right before executing the WFI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * IO barrier ensure that there is no synchronisation loss on initiators
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * operating on both interconnect port simultaneously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * This is a stronger version of the OMAP4 memory barrier below, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * operates on both the MPU->MA->EMIF path but also the MPU->OCP path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * as well, and is necessary prior to executing a WFI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) void omap_interconnect_sync(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (dram_sync && sram_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) writel_relaxed(readl_relaxed(dram_sync), dram_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) writel_relaxed(readl_relaxed(sram_sync), sram_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int __init omap4_sram_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct gen_pool *sram_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!soc_is_omap44xx() && !soc_is_omap54xx())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) sram_pool = of_gen_pool_get(np, "sram", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (!sram_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) omap_arch_initcall(omap4_sram_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Steal one page physical memory for barrier implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void __init omap_barrier_reserve_memblock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void __init omap_barriers_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct map_desc dram_io_desc[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dram_io_desc[0].length = dram_sync_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) dram_io_desc[0].type = MT_MEMORY_RW_SO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) dram_sync = (void __iomem *) dram_io_desc[0].virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pr_info("OMAP4: Map %pa to %p for dram barrier\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) &dram_sync_paddr, dram_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) soc_mb = omap4_mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) void gic_dist_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (gic_dist_base_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) writel_relaxed(0x0, gic_dist_base_addr + GIC_DIST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void gic_dist_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (gic_dist_base_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) writel_relaxed(0x1, gic_dist_base_addr + GIC_DIST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) bool gic_dist_disabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return !(readl_relaxed(gic_dist_base_addr + GIC_DIST_CTRL) & 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void gic_timer_retrigger(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u32 twd_int = readl_relaxed(twd_base + TWD_TIMER_INTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 gic_int = readl_relaxed(gic_dist_base_addr + GIC_DIST_PENDING_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u32 twd_ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (twd_int && !(gic_int & BIT(IRQ_LOCALTIMER))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * The local timer interrupt got lost while the distributor was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * disabled. Ack the pending interrupt, and retrigger it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pr_warn("%s: lost localtimer interrupt\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!(twd_ctrl & TWD_TIMER_CONTROL_PERIODIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) writel_relaxed(1, twd_base + TWD_TIMER_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) twd_ctrl |= TWD_TIMER_CONTROL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) writel_relaxed(twd_ctrl, twd_base + TWD_TIMER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #ifdef CONFIG_CACHE_L2X0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) void __iomem *omap4_get_l2cache_base(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return l2cache_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned smc_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case L2X0_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) smc_op = OMAP4_MON_L2X0_CTRL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) case L2X0_AUX_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) smc_op = OMAP4_MON_L2X0_AUXCTRL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) case L2X0_DEBUG_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) smc_op = OMAP4_MON_L2X0_DBG_CTRL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) case L310_PREFETCH_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) case L310_POWER_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) omap_smc1(smc_op, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int __init omap_l2_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Static mapping, never released */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (WARN_ON(!l2cache_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) void __iomem *omap4_get_sar_ram_base(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return sar_ram_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * SAR RAM used to save and restore the HW context in low power modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Note that we need to initialize this very early for kexec. See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * omap4_mpuss_early_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) void __init omap4_sar_ram_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned long sar_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * To avoid code running on other OMAPs in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * multi-omap builds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (cpu_is_omap44xx())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) sar_base = OMAP44XX_SAR_RAM_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) else if (soc_is_omap54xx())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) sar_base = OMAP54XX_SAR_RAM_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* Static mapping, never released */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) sar_ram_base = ioremap(sar_base, SZ_16K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (WARN_ON(!sar_ram_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static const struct of_device_id intc_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) { .compatible = "ti,omap4-wugen-mpu", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) { .compatible = "ti,omap5-wugen-mpu", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static struct device_node *intc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) void __init omap_gic_of_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) intc_node = of_find_matching_node(NULL, intc_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (WARN_ON(!intc_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pr_err("No WUGEN found in DT, system will misbehave.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) pr_err("UPDATE YOUR DEVICE TREE!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* Extract GIC distributor and TWD bases for OMAP4460 ROM Errata WA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (!cpu_is_omap446x())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) goto skip_errata_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) gic_dist_base_addr = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) WARN_ON(!gic_dist_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-twd-timer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) twd_base = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) WARN_ON(!twd_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) skip_errata_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) irqchip_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }