^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2007 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/cp15.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/hardware/cache-l2x0.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/hardware/cache-aurora-l2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "cache-tauros3.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct l2c_init_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) const char *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned way_size_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned num_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void (*of_parse)(const struct device_node *, u32 *, u32 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void (*enable)(void __iomem *, unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void (*save)(void __iomem *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void (*configure)(void __iomem *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) void (*unlock)(void __iomem *, unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct outer_cache_fns outer_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define CACHE_LINE_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static void __iomem *l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static const struct l2c_init_data *l2x0_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static DEFINE_RAW_SPINLOCK(l2x0_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static u32 l2x0_way_mask; /* Bitmask of active ways */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static u32 l2x0_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct l2x0_regs l2x0_saved_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static bool l2x0_bresp_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static bool l2x0_flz_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Common code for all cache controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* wait for cache operation by line or way to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) while (readl_relaxed(reg) & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * By default, we write directly to secure registers. Platforms must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * override this if they are running non-secure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (val == readl_relaxed(base + reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (outer_cache.write_sec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) outer_cache.write_sec(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) writel_relaxed(val, base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * This should only be called when we have a requirement that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * register be written due to a work-around, as platforms running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * in non-secure mode may not be able to access this register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static inline void l2c_set_debug(void __iomem *base, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static void __l2c_op_way(void __iomem *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) writel_relaxed(l2x0_way_mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) l2c_wait_mask(reg, l2x0_way_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline void l2c_unlock(void __iomem *base, unsigned num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) i * L2X0_LOCKDOWN_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) i * L2X0_LOCKDOWN_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void l2c_configure(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Enable the L2 cache controller. This function must only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * called when the cache controller is known to be disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void l2c_enable(void __iomem *base, unsigned num_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (outer_cache.configure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) outer_cache.configure(&l2x0_saved_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) l2x0_data->configure(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) l2x0_data->unlock(base, num_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) __l2c_op_way(base + L2X0_INV_WAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) writel_relaxed(0, base + sync_reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) l2c_wait_mask(base + sync_reg_offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void l2c_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) l2x0_pmu_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) outer_cache.flush_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) l2c_write_sec(0, base, L2X0_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) dsb(st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void l2c_save(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void l2c_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Do not touch the controller if already enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) l2c_enable(base, l2x0_data->num_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) l2x0_pmu_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * L2C-210 specific code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * ensure that no background operation is running. The way operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * are all background tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * While a background operation is in progress, any new operation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * ignored (unspecified whether this causes an error.) Thankfully, not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * used on SMP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Never has a different sync register other than L2X0_CACHE_SYNC, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * we use sync_reg_offset here so we can share some of this with L2C-310.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void __l2c210_cache_sync(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) writel_relaxed(0, base + sync_reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) writel_relaxed(start, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) start += CACHE_LINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void l2c210_inv_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (start & (CACHE_LINE_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) start &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) start += CACHE_LINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (end & (CACHE_LINE_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) end &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __l2c210_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static void l2c210_clean_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) start &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) __l2c210_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void l2c210_flush_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) start &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __l2c210_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void l2c210_flush_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) BUG_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) __l2c210_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static void l2c210_sync(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) __l2c210_cache_sync(l2x0_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static const struct l2c_init_data l2c210_data __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) .type = "L2C-210",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .way_size_0 = SZ_8K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .num_lock = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .enable = l2c_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .save = l2c_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .configure = l2c_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .unlock = l2c_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .inv_range = l2c210_inv_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .clean_range = l2c210_clean_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) .flush_range = l2c210_flush_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .flush_all = l2c210_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .disable = l2c_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) .sync = l2c210_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) .resume = l2c_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * L2C-220 specific code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * All operations are background operations: they have to be waited for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Conflicting requests generate a slave error (which will cause an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * sync register here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * However, we can re-use the l2c210_resume call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static inline void __l2c220_cache_sync(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) writel_relaxed(0, base + L2X0_CACHE_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static void l2c220_op_way(void __iomem *base, unsigned reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) raw_spin_lock_irqsave(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) __l2c_op_way(base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) __l2c220_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) raw_spin_unlock_irqrestore(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned long end, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) raw_spinlock_t *lock = &l2x0_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long blk_end = start + min(end - start, 4096UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) while (start < blk_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) l2c_wait_mask(reg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) writel_relaxed(start, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) start += CACHE_LINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (blk_end < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void l2c220_inv_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) raw_spin_lock_irqsave(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if ((start | end) & (CACHE_LINE_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (start & (CACHE_LINE_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) start &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) start += CACHE_LINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (end & (CACHE_LINE_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) end &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) start, end, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) __l2c220_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) raw_spin_unlock_irqrestore(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void l2c220_clean_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) start &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if ((end - start) >= l2x0_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) l2c220_op_way(base, L2X0_CLEAN_WAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) raw_spin_lock_irqsave(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) start, end, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) __l2c220_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) raw_spin_unlock_irqrestore(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void l2c220_flush_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) start &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if ((end - start) >= l2x0_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) raw_spin_lock_irqsave(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) start, end, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) __l2c220_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) raw_spin_unlock_irqrestore(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void l2c220_flush_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void l2c220_sync(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) raw_spin_lock_irqsave(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) __l2c220_cache_sync(l2x0_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) raw_spin_unlock_irqrestore(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static void l2c220_enable(void __iomem *base, unsigned num_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * Always enable non-secure access to the lockdown registers -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * we write to them as part of the L2C enable sequence so they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * need to be accessible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) l2c_enable(base, num_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void l2c220_unlock(void __iomem *base, unsigned num_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) l2c_unlock(base, num_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static const struct l2c_init_data l2c220_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) .type = "L2C-220",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) .way_size_0 = SZ_8K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) .num_lock = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) .enable = l2c220_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) .save = l2c_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) .configure = l2c_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .unlock = l2c220_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) .inv_range = l2c220_inv_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) .clean_range = l2c220_clean_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) .flush_range = l2c220_flush_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) .flush_all = l2c220_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) .disable = l2c_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) .sync = l2c220_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) .resume = l2c_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * L2C-310 specific code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * and the way operations are all background tasks. However, issuing an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * operation while a background operation is in progress results in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * SLVERR response. We can reuse:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * __l2c210_cache_sync (using sync_reg_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * l2c210_sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * l2c210_inv_range (if 588369 is not applicable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * l2c210_clean_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * l2c210_flush_range (if 588369 is not applicable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * l2c210_flush_all (if 727915 is not applicable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * Errata:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * 588369: PL310 R0P0->R1P0, fixed R2P0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * Affects: all clean+invalidate operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * clean and invalidate skips the invalidate step, so we need to issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * separate operations. We also require the above debug workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * enclosing this code fragment on affected parts. On unaffected parts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * we must not use this workaround without the debug register writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * to avoid exposing a problem similar to 727915.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * 727915: PL310 R2P0->R3P0, fixed R3P1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Affects: clean+invalidate by way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * clean and invalidate by way runs in the background, and a store can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * hit the line between the clean operation and invalidate operation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * resulting in the store being lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Affects: 8x64-bit (double fill) line fetches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * double fill line fetches can fail to cause dirty data to be evicted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * from the cache before the new data overwrites the second line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * 753970: PL310 R3P0, fixed R3P1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * Affects: sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * prevents merging writes after the sync operation, until another L2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * operation is performed (or a number of other conditions.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * 769419: PL310 R0P0->R3P1, fixed R3P2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Affects: store buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * store buffer is not automatically drained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if ((start | end) & (CACHE_LINE_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Erratum 588369 for both clean+invalidate operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) raw_spin_lock_irqsave(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) l2c_set_debug(base, 0x03);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (start & (CACHE_LINE_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) start &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) writel_relaxed(start, base + L2X0_INV_LINE_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) start += CACHE_LINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (end & (CACHE_LINE_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) end &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) writel_relaxed(end, base + L2X0_INV_LINE_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) l2c_set_debug(base, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) raw_spin_unlock_irqrestore(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) __l2c210_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) raw_spinlock_t *lock = &l2x0_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) unsigned long blk_end = start + min(end - start, 4096UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) l2c_set_debug(base, 0x03);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) while (start < blk_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) writel_relaxed(start, base + L2X0_INV_LINE_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) start += CACHE_LINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) l2c_set_debug(base, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (blk_end < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) __l2c210_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static void l2c310_flush_all_erratum(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) raw_spin_lock_irqsave(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) l2c_set_debug(base, 0x03);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) l2c_set_debug(base, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) __l2c210_cache_sync(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) raw_spin_unlock_irqrestore(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static void __init l2c310_save(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) unsigned revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) l2c_save(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) l2x0_saved_regs.tag_latency = readl_relaxed(base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) L310_TAG_LATENCY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) l2x0_saved_regs.data_latency = readl_relaxed(base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) L310_DATA_LATENCY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) l2x0_saved_regs.filter_end = readl_relaxed(base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) L310_ADDR_FILTER_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) l2x0_saved_regs.filter_start = readl_relaxed(base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) L310_ADDR_FILTER_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) revision = readl_relaxed(base + L2X0_CACHE_ID) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) L2X0_CACHE_ID_RTL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* From r2p0, there is Prefetch offset/control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (revision >= L310_CACHE_ID_RTL_R2P0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) L310_PREFETCH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* From r3p0, there is Power control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (revision >= L310_CACHE_ID_RTL_R3P0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) L310_POWER_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static void l2c310_configure(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) unsigned revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) l2c_configure(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* restore pl310 setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) l2c_write_sec(l2x0_saved_regs.tag_latency, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) L310_TAG_LATENCY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) l2c_write_sec(l2x0_saved_regs.data_latency, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) L310_DATA_LATENCY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) l2c_write_sec(l2x0_saved_regs.filter_end, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) L310_ADDR_FILTER_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) l2c_write_sec(l2x0_saved_regs.filter_start, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) L310_ADDR_FILTER_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) revision = readl_relaxed(base + L2X0_CACHE_ID) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) L2X0_CACHE_ID_RTL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (revision >= L310_CACHE_ID_RTL_R2P0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) L310_PREFETCH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (revision >= L310_CACHE_ID_RTL_R3P0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) L310_POWER_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static int l2c310_starting_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static int l2c310_dying_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u32 aux = l2x0_saved_regs.aux_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (rev >= L310_CACHE_ID_RTL_R2P0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (cortex_a9 && !l2x0_bresp_disable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) aux |= L310_AUX_CTRL_EARLY_BRESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) aux &= ~L310_AUX_CTRL_EARLY_BRESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (cortex_a9 && !l2x0_flz_disable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) u32 acr = get_auxcr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * Always enable non-secure access to the lockdown registers -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * we write to them as part of the L2C enable sequence so they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * need to be accessible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) l2c_enable(base, num_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* Read back resulting AUX_CTRL value as it could have been altered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) aux = readl_relaxed(base + L2X0_AUX_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* r3p0 or later has power control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (rev >= L310_CACHE_ID_RTL_R3P0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) u32 power_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) "arm/l2x0:starting", l2c310_starting_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) l2c310_dying_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct outer_cache_fns *fns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) const char *errata[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) unsigned n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) revision < L310_CACHE_ID_RTL_R2P0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* For bcm compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) fns->inv_range == l2c210_inv_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) fns->inv_range = l2c310_inv_range_erratum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) fns->flush_range = l2c310_flush_range_erratum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) errata[n++] = "588369";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) revision >= L310_CACHE_ID_RTL_R2P0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) revision < L310_CACHE_ID_RTL_R3P1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) fns->flush_all = l2c310_flush_all_erratum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) errata[n++] = "727915";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (revision >= L310_CACHE_ID_RTL_R3P0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) revision < L310_CACHE_ID_RTL_R3P2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) u32 val = l2x0_saved_regs.prefetch_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (val & L310_PREFETCH_CTRL_DBL_LINEFILL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) val &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) l2x0_saved_regs.prefetch_ctrl = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) errata[n++] = "752271";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) revision == L310_CACHE_ID_RTL_R3P0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) sync_reg_offset = L2X0_DUMMY_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) errata[n++] = "753970";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) errata[n++] = "769419";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pr_cont(" %s", errata[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) pr_cont(" enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static void l2c310_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * If full-line-of-zeros is enabled, we must first disable it in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * Cortex-A9 auxiliary control register before disabling the L2 cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) l2c_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static void l2c310_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) l2c_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /* Re-enable full-line-of-zeros for Cortex-A9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static void l2c310_unlock(void __iomem *base, unsigned num_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) l2c_unlock(base, num_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static const struct l2c_init_data l2c310_init_fns __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .type = "L2C-310",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .way_size_0 = SZ_8K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .num_lock = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .enable = l2c310_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .fixup = l2c310_fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) .save = l2c310_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) .configure = l2c310_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) .unlock = l2c310_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) .inv_range = l2c210_inv_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) .clean_range = l2c210_clean_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) .flush_range = l2c210_flush_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) .flush_all = l2c210_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) .disable = l2c310_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) .sync = l2c210_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) .resume = l2c310_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static int __init __l2c_init(const struct l2c_init_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct outer_cache_fns fns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) unsigned way_size_bits, ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) u32 aux, old_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * Save the pointer globally so that callbacks which do not receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * context from callers can access the structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!l2x0_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * Sanity check the aux values. aux_mask is the bits we preserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * from reading the hardware register, and aux_val is the bits we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (aux_val & aux_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) pr_alert("L2C: platform provided aux values permit register corruption.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) aux &= aux_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) aux |= aux_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (old_aux != aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) old_aux, aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* Determine the number of ways */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) case L2X0_CACHE_ID_PART_L310:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (aux & (1 << 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ways = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ways = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) case L2X0_CACHE_ID_PART_L210:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) case L2X0_CACHE_ID_PART_L220:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ways = (aux >> 13) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) case AURORA_CACHE_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ways = (aux >> 13) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ways = 2 << ((ways + 1) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /* Assume unknown chips have 8 ways */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ways = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) l2x0_way_mask = (1 << ways) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * way_size_0 is the size that a way_size value of zero would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * given the calculation: way_size = way_size_0 << way_size_bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * then way_size_0 would be 8k.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * L2 cache size = number of ways * way size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) L2C_AUX_CTRL_WAY_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) l2x0_size = ways * (data->way_size_0 << way_size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) fns = data->outer_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) fns.write_sec = outer_cache.write_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) fns.configure = outer_cache.configure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (data->fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) data->fixup(l2x0_base, cache_id, &fns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (nosync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) pr_info("L2C: disabling outer sync\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) fns.sync = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * Check if l2x0 controller is already enabled. If we are booting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * in non-secure mode accessing the below registers will fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) l2x0_saved_regs.aux_ctrl = aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) data->enable(l2x0_base, data->num_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) outer_cache = fns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * It is strange to save the register state before initialisation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * but hey, this is what the DT implementations decided to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (data->save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) data->save(l2x0_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* Re-read it in case some bits are reserved. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) pr_info("%s cache controller enabled, %d ways, %d kB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) data->type, ways, l2x0_size >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) data->type, cache_id, aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) l2x0_pmu_register(l2x0_base, cache_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) const struct l2c_init_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) u32 cache_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) l2x0_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) cache_id = readl_relaxed(base + L2X0_CACHE_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) case L2X0_CACHE_ID_PART_L210:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) data = &l2c210_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) case L2X0_CACHE_ID_PART_L220:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) data = &l2c220_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) case L2X0_CACHE_ID_PART_L310:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) data = &l2c310_init_fns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* Read back current (default) hardware configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (data->save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) data->save(l2x0_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) __l2c_init(data, aux_val, aux_mask, cache_id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static int l2_wt_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* Aurora don't have the cache ID register available, so we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * pass it though the device tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static u32 cache_id_part_number_from_dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * l2x0_cache_size_of_parse() - read cache size parameters from DT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * @np: the device tree node for the l2 cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * @aux_val: pointer to machine-supplied auxilary register value, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * be augmented by the call (bits to be set to 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * @aux_mask: pointer to machine-supplied auxilary register mask, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * be augmented by the call (bits to be set to 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * @associativity: variable to return the calculated associativity in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * @max_way_size: the maximum size in bytes for the cache ways
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static int __init l2x0_cache_size_of_parse(const struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u32 *aux_val, u32 *aux_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) u32 *associativity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) u32 max_way_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) u32 mask = 0, val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) u32 cache_size = 0, sets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) u32 way_size_bits = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) u32 way_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) u32 block_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) u32 line_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) of_property_read_u32(np, "cache-size", &cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) of_property_read_u32(np, "cache-sets", &sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) of_property_read_u32(np, "cache-block-size", &block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) of_property_read_u32(np, "cache-line-size", &line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (!cache_size || !sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* All these l2 caches have the same line = block size actually */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (!line_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (block_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* If linesize is not given, it is equal to blocksize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) line_size = block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* Fall back to known size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) pr_warn("L2C OF: no cache block/line size given: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) "falling back to default size %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) CACHE_LINE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) line_size = CACHE_LINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (line_size != CACHE_LINE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) pr_warn("L2C OF: DT supplied line size %d bytes does "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) "not match hardware line size of %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) line_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) CACHE_LINE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * Since:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * set size = cache size / sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * ways = cache size / (sets * line size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * way size = cache size / (cache size / (sets * line size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * way size = sets * line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * associativity = ways = cache size / way size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) way_size = sets * line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) *associativity = cache_size / way_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (way_size > max_way_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) pr_err("L2C OF: set size %dKB is too large\n", way_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) cache_size, cache_size >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) pr_info("L2C OF: override line size: %d bytes\n", line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) way_size, way_size >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) pr_info("L2C OF: override associativity: %d\n", *associativity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * Calculates the bits 17:19 to set for way size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * 512KB -> 6, 256KB -> 5, ... 16KB -> 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) way_size_bits = ilog2(way_size >> 10) - 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (way_size_bits < 1 || way_size_bits > 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) way_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) *aux_val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) *aux_val |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) *aux_mask &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static void __init l2x0_of_parse(const struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) u32 *aux_val, u32 *aux_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) u32 data[2] = { 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) u32 tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) u32 dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) u32 val = 0, mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) u32 assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) of_property_read_u32(np, "arm,tag-latency", &tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) of_property_read_u32_array(np, "arm,data-latency",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) data, ARRAY_SIZE(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (data[0] && data[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) of_property_read_u32(np, "arm,dirty-latency", &dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (of_property_read_bool(np, "arm,parity-enable")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) val |= L2C_AUX_CTRL_PARITY_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) } else if (of_property_read_bool(np, "arm,parity-disable")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (of_property_read_bool(np, "arm,shared-override")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (assoc > 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) pr_err("l2x0 of: cache setting yield too high associativity\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) pr_err("l2x0 of: %d calculated, max 8\n", assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) mask |= L2X0_AUX_CTRL_ASSOC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) *aux_val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) *aux_val |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) *aux_mask &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static const struct l2c_init_data of_l2c210_data __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) .type = "L2C-210",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) .way_size_0 = SZ_8K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) .num_lock = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) .of_parse = l2x0_of_parse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) .enable = l2c_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) .save = l2c_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) .configure = l2c_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) .unlock = l2c_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) .inv_range = l2c210_inv_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) .clean_range = l2c210_clean_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) .flush_range = l2c210_flush_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) .flush_all = l2c210_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) .disable = l2c_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) .sync = l2c210_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) .resume = l2c_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static const struct l2c_init_data of_l2c220_data __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .type = "L2C-220",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .way_size_0 = SZ_8K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .num_lock = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .of_parse = l2x0_of_parse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) .enable = l2c220_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) .save = l2c_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) .configure = l2c_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) .unlock = l2c220_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) .inv_range = l2c220_inv_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) .clean_range = l2c220_clean_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) .flush_range = l2c220_flush_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) .flush_all = l2c220_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) .disable = l2c_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) .sync = l2c220_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) .resume = l2c_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static void __init l2c310_of_parse(const struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) u32 *aux_val, u32 *aux_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) u32 data[3] = { 0, 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) u32 tag[3] = { 0, 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) u32 filter[2] = { 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) u32 assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) u32 prefetch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) u32 power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (tag[0] && tag[1] && tag[2])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) l2x0_saved_regs.tag_latency =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) L310_LATENCY_CTRL_RD(tag[0] - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) L310_LATENCY_CTRL_WR(tag[1] - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) L310_LATENCY_CTRL_SETUP(tag[2] - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) of_property_read_u32_array(np, "arm,data-latency",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) data, ARRAY_SIZE(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (data[0] && data[1] && data[2])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) l2x0_saved_regs.data_latency =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) L310_LATENCY_CTRL_RD(data[0] - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) L310_LATENCY_CTRL_WR(data[1] - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) L310_LATENCY_CTRL_SETUP(data[2] - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) of_property_read_u32_array(np, "arm,filter-ranges",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) filter, ARRAY_SIZE(filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (filter[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) l2x0_saved_regs.filter_end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) ALIGN(filter[0] + filter[1], SZ_1M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) | L310_ADDR_FILTER_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) switch (assoc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (of_property_read_bool(np, "arm,shared-override")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (of_property_read_bool(np, "arm,parity-enable")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) *aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) } else if (of_property_read_bool(np, "arm,parity-disable")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) *aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (of_property_read_bool(np, "arm,early-bresp-disable"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) l2x0_bresp_disable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (of_property_read_bool(np, "arm,full-line-zero-disable"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) l2x0_flz_disable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) prefetch = l2x0_saved_regs.prefetch_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ret = of_property_read_u32(np, "arm,double-linefill", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) } else if (ret != -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) } else if (ret != -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) } else if (ret != -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) } else if (ret != -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) } else if (ret != -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) ret = of_property_read_u32(np, "prefetch-data", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) } else if (ret != -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) pr_err("L2C-310 OF prefetch-data property value is missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) ret = of_property_read_u32(np, "prefetch-instr", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) } else if (ret != -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) pr_err("L2C-310 OF prefetch-instr property value is missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) l2x0_saved_regs.prefetch_ctrl = prefetch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) power = l2x0_saved_regs.pwr_ctrl |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) power &= ~L310_DYNAMIC_CLK_GATING_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) } else if (ret != -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) ret = of_property_read_u32(np, "arm,standby-mode", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) power &= ~L310_STNDBY_MODE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) } else if (ret != -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) l2x0_saved_regs.pwr_ctrl = power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static const struct l2c_init_data of_l2c310_data __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) .type = "L2C-310",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) .way_size_0 = SZ_8K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) .num_lock = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) .of_parse = l2c310_of_parse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) .enable = l2c310_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) .fixup = l2c310_fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) .save = l2c310_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .configure = l2c310_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) .unlock = l2c310_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) .inv_range = l2c210_inv_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) .clean_range = l2c210_clean_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) .flush_range = l2c210_flush_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) .flush_all = l2c210_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) .disable = l2c310_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) .sync = l2c210_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) .resume = l2c310_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * This is a variant of the of_l2c310_data with .sync set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * NULL. Outer sync operations are not needed when the system is I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * coherent, and potentially harmful in certain situations (PCIe/PL310
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * deadlock on Armada 375/38x due to hardware I/O coherency). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * other operations are kept because they are infrequent (therefore do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * not cause the deadlock in practice) and needed for secondary CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * boot and other power management activities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) .type = "L2C-310 Coherent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .way_size_0 = SZ_8K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) .num_lock = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) .of_parse = l2c310_of_parse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) .enable = l2c310_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) .fixup = l2c310_fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) .save = l2c310_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) .configure = l2c310_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) .unlock = l2c310_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) .inv_range = l2c210_inv_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) .clean_range = l2c210_clean_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) .flush_range = l2c210_flush_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) .flush_all = l2c210_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) .disable = l2c310_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) .resume = l2c310_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * Note that the end addresses passed to Linux primitives are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * noninclusive, while the hardware cache range operations use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * inclusive start and end addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static unsigned long aurora_range_end(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * Limit the number of cache lines processed at once,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * since cache range operations stall the CPU pipeline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * until completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (end > start + AURORA_MAX_RANGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) end = start + AURORA_MAX_RANGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * Cache range operations can't straddle a page boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (end > PAGE_ALIGN(start+1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) end = PAGE_ALIGN(start+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) return end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) static void aurora_pa_range(unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) unsigned long range_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * round start and end adresses up to cache line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) start &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) end = ALIGN(end, CACHE_LINE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * perform operation on all full cache lines between 'start' and 'end'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) range_end = aurora_range_end(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) raw_spin_lock_irqsave(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) raw_spin_unlock_irqrestore(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) writel_relaxed(0, base + AURORA_SYNC_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) start = range_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) static void aurora_inv_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static void aurora_clean_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * If L2 is forced to WT, the L2 will always be clean and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * don't need to do anything here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (!l2_wt_override)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static void aurora_flush_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (l2_wt_override)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static void aurora_flush_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /* clean all ways */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) raw_spin_lock_irqsave(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) raw_spin_unlock_irqrestore(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) writel_relaxed(0, base + AURORA_SYNC_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static void aurora_cache_sync(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static void aurora_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) void __iomem *base = l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) raw_spin_lock_irqsave(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) writel_relaxed(0, base + AURORA_SYNC_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) l2c_write_sec(0, base, L2X0_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) dsb(st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) raw_spin_unlock_irqrestore(&l2x0_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) static void aurora_save(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * For Aurora cache in no outer mode, enable via the CP15 coprocessor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * broadcasting of cache commands to L2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static void __init aurora_enable_no_outer(void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) unsigned num_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) u32 u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) u |= AURORA_CTRL_FW; /* Set the FW bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) l2c_enable(base, num_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static void __init aurora_fixup(void __iomem *base, u32 cache_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) struct outer_cache_fns *fns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) sync_reg_offset = AURORA_SYNC_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static void __init aurora_of_parse(const struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) u32 *aux_val, u32 *aux_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) u32 mask = AURORA_ACR_REPLACEMENT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) of_property_read_u32(np, "cache-id-part",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) &cache_id_part_number_from_dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /* Determine and save the write policy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) l2_wt_override = of_property_read_bool(np, "wt-override");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (l2_wt_override) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (of_property_read_bool(np, "marvell,ecc-enable")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) mask |= AURORA_ACR_ECC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) val |= AURORA_ACR_ECC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (of_property_read_bool(np, "arm,parity-enable")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) mask |= AURORA_ACR_PARITY_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) val |= AURORA_ACR_PARITY_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) } else if (of_property_read_bool(np, "arm,parity-disable")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) mask |= AURORA_ACR_PARITY_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) *aux_val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) *aux_val |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) *aux_mask &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) .type = "Aurora",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) .way_size_0 = SZ_4K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) .num_lock = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) .of_parse = aurora_of_parse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) .enable = l2c_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) .fixup = aurora_fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) .save = aurora_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) .configure = l2c_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) .unlock = l2c_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) .inv_range = aurora_inv_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) .clean_range = aurora_clean_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) .flush_range = aurora_flush_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) .flush_all = aurora_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) .disable = aurora_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) .sync = aurora_cache_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) .resume = l2c_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) .type = "Aurora",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) .way_size_0 = SZ_4K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) .num_lock = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) .of_parse = aurora_of_parse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) .enable = aurora_enable_no_outer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) .fixup = aurora_fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) .save = aurora_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) .configure = l2c_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) .unlock = l2c_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) .resume = l2c_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * For certain Broadcom SoCs, depending on the address range, different offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * need to be added to the address before passing it to L2 for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * invalidation/clean/flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * Section Address Range Offset EMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * When the start and end addresses have crossed two different sections, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * need to break the L2 operation into two, each within its own section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * 0xC0000000 - 0xC0001000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * Note 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * By breaking a single L2 operation into two, we may potentially suffer some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * performance hit, but keep in mind the cross section case is very rare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * Note 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * We do not need to handle the case when the start address is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * Section 1 and the end address is in Section 3, since it is not a valid use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * Note 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * Section 1 in practical terms can no longer be used on rev A2. Because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * that the code does not need to handle section 1 at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) #define BCM_SYS_EMI_START_ADDR 0x40000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) #define BCM_SYS_EMI_OFFSET 0x40000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) #define BCM_VC_EMI_OFFSET 0x80000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static inline int bcm_addr_is_sys_emi(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return (addr >= BCM_SYS_EMI_START_ADDR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) (addr < BCM_VC_EMI_SEC3_START_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (bcm_addr_is_sys_emi(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) return addr + BCM_SYS_EMI_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return addr + BCM_VC_EMI_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static void bcm_inv_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) unsigned long new_start, new_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) BUG_ON(start < BCM_SYS_EMI_START_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (unlikely(end <= start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) new_start = bcm_l2_phys_addr(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) new_end = bcm_l2_phys_addr(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /* normal case, no cross section between start and end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) l2c210_inv_range(new_start, new_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /* They cross sections, so it can only be a cross from section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * 2 to section 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) l2c210_inv_range(new_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) new_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static void bcm_clean_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) unsigned long new_start, new_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) BUG_ON(start < BCM_SYS_EMI_START_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (unlikely(end <= start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) new_start = bcm_l2_phys_addr(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) new_end = bcm_l2_phys_addr(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /* normal case, no cross section between start and end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) l2c210_clean_range(new_start, new_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) /* They cross sections, so it can only be a cross from section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * 2 to section 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) l2c210_clean_range(new_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) new_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) static void bcm_flush_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) unsigned long new_start, new_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) BUG_ON(start < BCM_SYS_EMI_START_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (unlikely(end <= start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if ((end - start) >= l2x0_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) outer_cache.flush_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) new_start = bcm_l2_phys_addr(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) new_end = bcm_l2_phys_addr(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) /* normal case, no cross section between start and end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) l2c210_flush_range(new_start, new_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /* They cross sections, so it can only be a cross from section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * 2 to section 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) l2c210_flush_range(new_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) new_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) .type = "BCM-L2C-310",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) .way_size_0 = SZ_8K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) .num_lock = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) .of_parse = l2c310_of_parse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) .enable = l2c310_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) .save = l2c310_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) .configure = l2c310_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) .unlock = l2c310_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) .inv_range = bcm_inv_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) .clean_range = bcm_clean_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) .flush_range = bcm_flush_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) .flush_all = l2c210_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) .disable = l2c310_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) .sync = l2c210_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) .resume = l2c310_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) static void __init tauros3_save(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) l2c_save(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) l2x0_saved_regs.aux2_ctrl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) readl_relaxed(base + TAUROS3_AUX2_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) l2x0_saved_regs.prefetch_ctrl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) readl_relaxed(base + L310_PREFETCH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) static void tauros3_configure(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) l2c_configure(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) writel_relaxed(l2x0_saved_regs.aux2_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) base + TAUROS3_AUX2_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) base + L310_PREFETCH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static const struct l2c_init_data of_tauros3_data __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) .type = "Tauros3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) .way_size_0 = SZ_8K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) .num_lock = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) .enable = l2c_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) .save = tauros3_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) .configure = tauros3_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) .unlock = l2c_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /* Tauros3 broadcasts L1 cache operations to L2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) .outer_cache = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) .resume = l2c_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) static const struct of_device_id l2x0_ids[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) L2C_ID("arm,l210-cache", of_l2c210_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) L2C_ID("arm,l220-cache", of_l2c220_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) L2C_ID("arm,pl310-cache", of_l2c310_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) L2C_ID("marvell,tauros3-cache", of_tauros3_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /* Deprecated IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) const struct l2c_init_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) u32 cache_id, old_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) u32 cache_level = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) bool nosync = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) np = of_find_matching_node(NULL, l2x0_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (of_address_to_resource(np, 0, &res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) l2x0_base = ioremap(res.start, resource_size(&res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (!l2x0_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) l2x0_saved_regs.phy_base = res.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) data = of_match_node(l2x0_ids, np)->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (of_device_is_compatible(np, "arm,pl310-cache") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) of_property_read_bool(np, "arm,io-coherent"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) data = &of_l2c310_coherent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (old_aux != ((old_aux & aux_mask) | aux_val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) old_aux, (old_aux & aux_mask) | aux_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) } else if (aux_mask != ~0U && aux_val != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /* All L2 caches are unified, so this property should be specified */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (!of_property_read_bool(np, "cache-unified"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) pr_err("L2C: device tree omits to specify unified cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (of_property_read_u32(np, "cache-level", &cache_level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) pr_err("L2C: device tree omits to specify cache-level\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (cache_level != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) pr_err("L2C: device tree specifies invalid cache level\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) nosync = of_property_read_bool(np, "arm,outer-sync-disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) /* Read back current (default) hardware configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (data->save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) data->save(l2x0_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /* L2 configuration can only be changed if the cache is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (data->of_parse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) data->of_parse(np, &aux_val, &aux_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (cache_id_part_number_from_dt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) cache_id = cache_id_part_number_from_dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) #endif