^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2007 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/cp15.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define CR_L2 (1 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define CACHE_LINE_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define CACHE_LINE_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define CACHE_WAY_PER_SET 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static inline int xsc3_l2_present(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned long l2ctype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return !!(l2ctype & 0xf8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static inline void xsc3_l2_clean_mva(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static inline void xsc3_l2_inv_mva(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline void xsc3_l2_inv_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long l2ctype, set_way;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int set, way;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) for (way = 0; way < CACHE_WAY_PER_SET; way++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) set_way = (way << 29) | (set << 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) dsb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static inline void l2_unmap_va(unsigned long va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (va != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) kunmap_atomic((void *)va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned long va = prev_va & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Switching to a new page. Because cache ops are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * using virtual addresses only, we must put a mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * in place for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) l2_unmap_va(prev_va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return va + (pa_offset >> (32 - PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return __phys_to_virt(pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (start == 0 && end == -1ul) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) xsc3_l2_inv_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) vaddr = -1; /* to force the first mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Clean and invalidate partial first cache line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (start & (CACHE_LINE_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) xsc3_l2_clean_mva(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) xsc3_l2_inv_mva(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) start = (start | (CACHE_LINE_SIZE - 1)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Invalidate all full cache lines between 'start' and 'end'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) vaddr = l2_map_va(start, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) xsc3_l2_inv_mva(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) start += CACHE_LINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Clean and invalidate partial last cache line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) vaddr = l2_map_va(start, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) xsc3_l2_clean_mva(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) xsc3_l2_inv_mva(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) l2_unmap_va(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) dsb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) vaddr = -1; /* to force the first mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) start &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) vaddr = l2_map_va(start, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) xsc3_l2_clean_mva(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) start += CACHE_LINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) l2_unmap_va(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dsb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * optimize L2 flush all operation by set/way format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline void xsc3_l2_flush_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned long l2ctype, set_way;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int set, way;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) for (way = 0; way < CACHE_WAY_PER_SET; way++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) set_way = (way << 29) | (set << 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) dsb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (start == 0 && end == -1ul) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) xsc3_l2_flush_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) vaddr = -1; /* to force the first mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) start &= ~(CACHE_LINE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) vaddr = l2_map_va(start, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) xsc3_l2_clean_mva(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) xsc3_l2_inv_mva(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) start += CACHE_LINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) l2_unmap_va(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dsb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int __init xsc3_l2_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!cpu_is_xsc3() || !xsc3_l2_present())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (get_cr() & CR_L2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pr_info("XScale3 L2 cache enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) xsc3_l2_inv_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) outer_cache.inv_range = xsc3_l2_inv_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) outer_cache.clean_range = xsc3_l2_clean_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) outer_cache.flush_range = xsc3_l2_flush_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) core_initcall(xsc3_l2_init);