Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Broadcom Brahma-B15 CPU read-ahead cache management functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2015-2016 Broadcom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/hardware/cache-b15-rac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) extern void v7_flush_kern_cache_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /* RAC register offsets, relative to the HIF_CPU_BIUCTRL register base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define RAC_CONFIG0_REG			(0x78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define  RACENPREF_MASK			(0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define  RACPREFINST_SHIFT		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define  RACENINST_SHIFT		(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define  RACPREFDATA_SHIFT		(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define  RACENDATA_SHIFT		(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define  RAC_CPU_SHIFT			(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define  RACCFG_MASK			(0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define RAC_CONFIG1_REG			(0x7c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) /* Brahma-B15 is a quad-core only design */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define B15_RAC_FLUSH_REG		(0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) /* Brahma-B53 is an octo-core design */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define B53_RAC_FLUSH_REG		(0x84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define  FLUSH_RAC			(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define RAC_DATA_INST_EN_MASK		(1 << RACPREFINST_SHIFT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 					 RACENPREF_MASK << RACENINST_SHIFT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 					 1 << RACPREFDATA_SHIFT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 					 RACENPREF_MASK << RACENDATA_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define RAC_ENABLED			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) /* Special state where we want to bypass the spinlock and call directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * into the v7 cache maintenance operations during suspend/resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define RAC_SUSPENDED			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) static void __iomem *b15_rac_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static DEFINE_SPINLOCK(rac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static u32 rac_config0_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static u32 rac_flush_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /* Initialization flag to avoid checking for b15_rac_base, and to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * multi-platform kernels from crashing here as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static unsigned long b15_rac_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static inline u32 __b15_rac_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	u32 val = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	__raw_writel(0, b15_rac_base + RAC_CONFIG0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	dmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static inline void __b15_rac_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	__raw_writel(FLUSH_RAC, b15_rac_base + rac_flush_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		/* This dmb() is required to force the Bus Interface Unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		 * to clean oustanding writes, and forces an idle cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		 * to be inserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		dmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		reg = __raw_readl(b15_rac_base + rac_flush_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	} while (reg & FLUSH_RAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static inline u32 b15_rac_disable_and_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	reg = __b15_rac_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	__b15_rac_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static inline void __b15_rac_enable(u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	__raw_writel(val, b15_rac_base + RAC_CONFIG0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	/* dsb() is required here to be consistent with __flush_icache_all() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	dsb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define BUILD_RAC_CACHE_OP(name, bar)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void b15_flush_##name(void)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	unsigned int do_flush;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	u32 val = 0;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) {		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		v7_flush_##name();				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		bar;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		return;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	spin_lock(&rac_lock);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	do_flush = test_bit(RAC_ENABLED, &b15_rac_flags);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (do_flush)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		val = b15_rac_disable_and_flush();		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	v7_flush_##name();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (!do_flush)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		bar;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	else							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		__b15_rac_enable(val);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	spin_unlock(&rac_lock);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define nobarrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* The readahead cache present in the Brahma-B15 CPU is a special piece of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * hardware after the integrated L2 cache of the B15 CPU complex whose purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  * is to prefetch instruction and/or data with a line size of either 64 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * or 256 bytes. The rationale is that the data-bus of the CPU interface is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * optimized for 256-bytes transactions, and enabling the readahead cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * provides a significant performance boost we want it enabled (typically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * twice the performance for a memcpy benchmark application).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * The readahead cache is transparent for Modified Virtual Addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * cache maintenance operations: ICIMVAU, DCIMVAC, DCCMVAC, DCCMVAU and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * DCCIMVAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * It is however not transparent for the following cache maintenance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * operations: DCISW, DCCSW, DCCISW, ICIALLUIS and ICIALLU which is precisely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * what we are patching here with our BUILD_RAC_CACHE_OP here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) BUILD_RAC_CACHE_OP(kern_cache_all, nobarrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void b15_rac_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	u32 enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	b15_rac_disable_and_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	__b15_rac_enable(enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int b15_rac_reboot_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 				   unsigned long action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 				   void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	/* During kexec, we are not yet migrated on the boot CPU, so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	 * make sure we are SMP safe here. Once the RAC is disabled, flag it as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 * suspended such that the hotplug notifier returns early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (action == SYS_RESTART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		spin_lock(&rac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		b15_rac_disable_and_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		clear_bit(RAC_ENABLED, &b15_rac_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		set_bit(RAC_SUSPENDED, &b15_rac_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		spin_unlock(&rac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static struct notifier_block b15_rac_reboot_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	.notifier_call	= b15_rac_reboot_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* The CPU hotplug case is the most interesting one, we basically need to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * sure that the RAC is disabled for the entire system prior to having a CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * die, in particular prior to this dying CPU having exited the coherency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * Once this CPU is marked dead, we can safely re-enable the RAC for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * remaining CPUs in the system which are still online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * Offlining a CPU is the problematic case, onlining a CPU is not much of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * issue since the CPU and its cache-level hierarchy will start filling with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * the RAC disabled, so L1 and L2 only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * In this function, we should NOT have to verify any unsafe setting/condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * b15_rac_base:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  *   It is protected by the RAC_ENABLED flag which is cleared by default, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  *   being cleared when initial procedure is done. b15_rac_base had been set at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  *   that time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * RAC_ENABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  *   There is a small timing windows, in b15_rac_init(), between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  *      cpuhp_setup_state_*()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  *      ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  *      set RAC_ENABLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  *   However, there is no hotplug activity based on the Linux booting procedure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  * Since we have to disable RAC for all cores, we keep RAC on as long as as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  * possible (disable it as late as possible) to gain the cache benefit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * Thus, dying/dead states are chosen here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  * We are choosing not do disable the RAC on a per-CPU basis, here, if we did
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  * we would want to consider disabling it as early as possible to benefit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  * other active CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Running on the dying CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int b15_rac_dying_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	/* During kexec/reboot, the RAC is disabled via the reboot notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	 * return early here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	spin_lock(&rac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/* Indicate that we are starting a hotplug procedure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	__clear_bit(RAC_ENABLED, &b15_rac_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	/* Disable the readahead cache and save its value to a global */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	rac_config0_reg = b15_rac_disable_and_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	spin_unlock(&rac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Running on a non-dying CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int b15_rac_dead_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	/* During kexec/reboot, the RAC is disabled via the reboot notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	 * return early here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	spin_lock(&rac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	/* And enable it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	__b15_rac_enable(rac_config0_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	__set_bit(RAC_ENABLED, &b15_rac_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	spin_unlock(&rac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int b15_rac_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	/* Suspend the read-ahead cache oeprations, forcing our cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	 * implementation to fallback to the regular ARMv7 calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	 * We are guaranteed to be running on the boot CPU at this point and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	 * with every other CPU quiesced, so setting RAC_SUSPENDED is not racy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	 * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	rac_config0_reg = b15_rac_disable_and_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	set_bit(RAC_SUSPENDED, &b15_rac_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void b15_rac_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	/* Coming out of a S3 suspend/resume cycle, the read-ahead cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	 * register RAC_CONFIG0_REG will be restored to its default value, make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	 * sure we re-enable it and set the enable flag, we are also guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 * to run on the boot CPU, so not racy again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	__b15_rac_enable(rac_config0_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	clear_bit(RAC_SUSPENDED, &b15_rac_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static struct syscore_ops b15_rac_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	.suspend	= b15_rac_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	.resume		= b15_rac_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int __init b15_rac_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	struct device_node *dn, *cpu_dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	int ret = 0, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	u32 reg, en_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	if (!dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	b15_rac_base = of_iomap(dn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	if (!b15_rac_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		pr_err("failed to remap BIU control base\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	cpu_dn = of_get_cpu_node(0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if (!cpu_dn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		rac_flush_offset = B15_RAC_FLUSH_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		rac_flush_offset = B53_RAC_FLUSH_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		pr_err("Unsupported CPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		of_node_put(cpu_dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	of_node_put(cpu_dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	ret = register_reboot_notifier(&b15_rac_reboot_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		pr_err("failed to register reboot notifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		iounmap(b15_rac_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 					"arm/cache-b15-rac:dead",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 					NULL, b15_rac_dead_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 					"arm/cache-b15-rac:dying",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 					NULL, b15_rac_dying_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			goto out_cpu_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	if (IS_ENABLED(CONFIG_PM_SLEEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		register_syscore_ops(&b15_rac_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	spin_lock(&rac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	WARN(reg & en_mask, "Read-ahead cache not previously disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	b15_rac_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	set_bit(RAC_ENABLED, &b15_rac_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	spin_unlock(&rac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	pr_info("%pOF: Broadcom Brahma-B15 readahead cache\n", dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) out_cpu_dead:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	unregister_reboot_notifier(&b15_rac_reboot_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	iounmap(b15_rac_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	of_node_put(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) arch_initcall(b15_rac_init);