^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ARC Cache Management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/cachectl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifdef CONFIG_ISA_ARCV2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define USE_RGN_FLSH 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static int l2_line_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static int ioc_exists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int slc_enable = 1, ioc_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned long sz, const int op, const int full_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) char *arc_cache_mumbojumbo(int c, char *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct cpuinfo_arc_cache *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define PR_CACHE(p, cfg, str) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (!(p)->line_len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) n += scnprintf(buf + n, len - n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) (p)->sz_k, (p)->assoc, (p)->line_len, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) (p)->vipt ? "VIPT" : "PIPT", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) (p)->alias ? " aliasing" : "", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) IS_USED_CFG(cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) p = &cpuinfo_arc700[c].slc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (p->line_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) n += scnprintf(buf + n, len - n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "SLC\t\t: %uK, %uB Line%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) perip_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Read the Cache Build Confuration Registers, Decode them and save into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * the cpuinfo structure for later use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * No Validation done here, simply read/convert the BCRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static void read_decode_cache_bcr_arcv2(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct bcr_generic sbcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct bcr_slc_cfg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned int pad:24, way:2, lsz:2, sz:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned int sz:4, lsz:2, way:2, pad:24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) } slc_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct bcr_clust_cfg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) } cbcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct bcr_volatile {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned int start:4, limit:4, pad:22, order:1, disable:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned int disable:1, order:1, pad:22, limit:4, start:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) } vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) READ_BCR(ARC_REG_SLC_BCR, sbcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (sbcr.ver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) p_slc->sz_k = 128 << slc_cfg.sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (cbcr.c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ioc_exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * As for today we don't support both IOC and ZONE_HIGHMEM enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * simultaneously. This happens because as of today IOC aperture covers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * only ZONE_NORMAL (low mem) and any dma transactions outside this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * region won't be HW coherent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * If we want to use both IOC and ZONE_HIGHMEM we can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * bounce_buffer to handle dma transactions to HIGHMEM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Also it is possible to modify dma_direct cache ops or increase IOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * aperture size if we are planning to use HIGHMEM without PAE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ioc_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ioc_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* HS 2.0 didn't have AUX_VOL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (cpuinfo_arc700[cpu].core.family > 0x51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) READ_BCR(AUX_VOL, vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) perip_base = vol.start << 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* HS 3.0 has limit and strict-ordering fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (cpuinfo_arc700[cpu].core.family > 0x52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) perip_end = (vol.limit << 28) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void read_decode_cache_bcr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct cpuinfo_arc_cache *p_ic, *p_dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct bcr_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) } ibcr, dbcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) p_ic = &cpuinfo_arc700[cpu].icache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) READ_BCR(ARC_REG_IC_BCR, ibcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (!ibcr.ver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) goto dc_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (ibcr.ver <= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) BUG_ON(ibcr.config != 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) p_ic->assoc = 2; /* Fixed to 2w set assoc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) } else if (ibcr.ver >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) p_ic->line_len = 8 << ibcr.line_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) p_ic->sz_k = 1 << (ibcr.sz - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) p_ic->vipt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dc_chk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) p_dc = &cpuinfo_arc700[cpu].dcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) READ_BCR(ARC_REG_DC_BCR, dbcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!dbcr.ver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) goto slc_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (dbcr.ver <= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) BUG_ON(dbcr.config != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) p_dc->assoc = 4; /* Fixed to 4w set assoc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) p_dc->vipt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) } else if (dbcr.ver >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) p_dc->vipt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) p_dc->alias = 0; /* PIPT so can't VIPT alias */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) p_dc->line_len = 16 << dbcr.line_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) p_dc->sz_k = 1 << (dbcr.sz - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) slc_chk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (is_isa_arcv2())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) read_decode_cache_bcr_arcv2(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Line Operation on {I,D}-Cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define OP_INV 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define OP_FLUSH 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define OP_FLUSH_N_INV 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define OP_INV_IC 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * The orig Cache Management Module "CDU" only required paddr to invalidate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * the exact same line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * paddr alone could not be used to correctly index the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * ------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * MMU v1/v2 (Fixed Page Size 8k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * ------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * The solution was to provide CDU with these additonal vaddr bits. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * would be bits [x:13], x would depend on cache-geometry, 13 comes from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * standard page size of 8k.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * represent the offset within cache-line. The adv of using this "clumsy"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * interface for additional info was no new reg was needed in CDU programming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * 17:13 represented the max num of bits passable, actual bits needed were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * fewer, based on the num-of-aliases possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * -for 2 alias possibility, only bit 13 needed (32K cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * -for 4 alias possibility, bits 14:13 needed (64K cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * ------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * MMU v3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * ------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * This ver of MMU supports variable page sizes (1k-16k): although Linux will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * only support 8k (default), 16k and 4k.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * However from hardware perspective, smaller page sizes aggravate aliasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * meaning more vaddr bits needed to disambiguate the cache-line-op ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * the existing scheme of piggybacking won't work for certain configurations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * Two new registers IC_PTAG and DC_PTAG inttoduced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned long sz, const int op, const int full_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned int aux_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int num_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (op == OP_INV_IC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) aux_cmd = ARC_REG_IC_IVIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Ensure we properly floor/ceil the non-line aligned/sized requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * and have @paddr - aligned to cache line and integral @num_lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * This however can be avoided for page sized since:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * -@paddr will be cache-line aligned already (being page aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * -@sz will be integral multiple of line size (being page sized).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!full_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) sz += paddr & ~CACHE_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) paddr &= CACHE_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) vaddr &= CACHE_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* MMUv2 and before: paddr contains stuffed vaddrs bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) while (num_lines-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) write_aux_reg(aux_cmd, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) paddr += L1_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * For ARC700 MMUv3 I-cache and D-cache flushes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * - ARC700 programming model requires paddr and vaddr be passed in seperate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * caches actually alias or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned long sz, const int op, const int full_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) unsigned int aux_cmd, aux_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int num_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (op == OP_INV_IC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) aux_cmd = ARC_REG_IC_IVIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) aux_tag = ARC_REG_IC_PTAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) aux_tag = ARC_REG_DC_PTAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Ensure we properly floor/ceil the non-line aligned/sized requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * and have @paddr - aligned to cache line and integral @num_lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * This however can be avoided for page sized since:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * -@paddr will be cache-line aligned already (being page aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * -@sz will be integral multiple of line size (being page sized).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!full_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) sz += paddr & ~CACHE_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) paddr &= CACHE_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) vaddr &= CACHE_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * MMUv3, cache ops require paddr in PTAG reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * if V-P const for loop, PTAG can be written once outside loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (full_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) write_aux_reg(aux_tag, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * This is technically for MMU v4, using the MMU v3 programming model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Special work for HS38 aliasing I-cache configuration with PAE40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * - upper 8 bits of paddr need to be written into PTAG_HI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * - (and needs to be written before the lower 32 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * Note that PTAG_HI is hoisted outside the line loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (is_pae40_enabled() && op == OP_INV_IC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) while (num_lines-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!full_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) write_aux_reg(aux_tag, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) paddr += L1_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) write_aux_reg(aux_cmd, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) vaddr += L1_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #ifndef USE_RGN_FLSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Here's how cache ops are implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * respectively, similar to MMU v3 programming model, hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * __cache_line_loop_v3() is used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * If PAE40 is enabled, independent of aliasing considerations, the higher bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * needs to be written into PTAG_HI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned long sz, const int op, const int full_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned int aux_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int num_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (op == OP_INV_IC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) aux_cmd = ARC_REG_IC_IVIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* Ensure we properly floor/ceil the non-line aligned/sized requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * and have @paddr - aligned to cache line and integral @num_lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * This however can be avoided for page sized since:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * -@paddr will be cache-line aligned already (being page aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * -@sz will be integral multiple of line size (being page sized).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (!full_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) sz += paddr & ~CACHE_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) paddr &= CACHE_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * For HS38 PAE40 configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * - upper 8 bits of paddr need to be written into PTAG_HI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * - (and needs to be written before the lower 32 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (is_pae40_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (op == OP_INV_IC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * Non aliasing I-cache in HS38,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * aliasing I-cache handled in __cache_line_loop_v3()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) while (num_lines-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) write_aux_reg(aux_cmd, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) paddr += L1_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * optimized flush operation which takes a region as opposed to iterating per line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned long sz, const int op, const int full_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) unsigned int s, e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Only for Non aliasing I-cache in HS38 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (op == OP_INV_IC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) s = ARC_REG_IC_IVIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) e = ARC_REG_IC_ENDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) s = ARC_REG_DC_STARTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) e = ARC_REG_DC_ENDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!full_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* for any leading gap between @paddr and start of cache line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) sz += paddr & ~CACHE_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) paddr &= CACHE_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * account for any trailing gap to end of cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * this is equivalent to DIV_ROUND_UP() in line ops above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) sz += L1_CACHE_BYTES - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (is_pae40_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* TBD: check if crossing 4TB boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (op == OP_INV_IC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* ENDR needs to be set ahead of START */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) write_aux_reg(s, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* caller waits on DC_CTRL.FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) #if (CONFIG_ARC_MMU_VER < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) #define __cache_line_loop __cache_line_loop_v2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) #elif (CONFIG_ARC_MMU_VER == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #define __cache_line_loop __cache_line_loop_v3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) #elif (CONFIG_ARC_MMU_VER > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #define __cache_line_loop __cache_line_loop_v4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #ifdef CONFIG_ARC_HAS_DCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /***************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * Machine specific helpers for Entire D-Cache or Per Line ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #ifndef USE_RGN_FLSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * this version avoids extra read/write of DC_CTRL for flush or invalid ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * in the non region flush regime (such as for ARCompact)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static inline void __before_dc_op(const int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (op == OP_FLUSH_N_INV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* Dcache provides 2 cmd: FLUSH or INV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * flush-n-inv is achieved by INV cmd but with IM=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * So toggle INV sub-mode depending on op request and default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) const unsigned int ctl = ARC_REG_DC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static inline void __before_dc_op(const int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) const unsigned int ctl = ARC_REG_DC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned int val = read_aux_reg(ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (op == OP_FLUSH_N_INV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) val |= DC_CTRL_INV_MODE_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (op != OP_INV_IC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) val &= ~DC_CTRL_RGN_OP_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (op & OP_INV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) val |= DC_CTRL_RGN_OP_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) write_aux_reg(ctl, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static inline void __after_dc_op(const int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (op & OP_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) const unsigned int ctl = ARC_REG_DC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* flush / flush-n-inv both wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Switch back to default Invalidate mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (op == OP_FLUSH_N_INV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * Operation on Entire D-Cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * Note that constant propagation ensures all the checks are gone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * in generated code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static inline void __dc_entire_op(const int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) __before_dc_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) aux = ARC_REG_DC_IVDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) aux = ARC_REG_DC_FLSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) write_aux_reg(aux, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) __after_dc_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static inline void __dc_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) const int r = ARC_REG_DC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) __dc_entire_op(OP_FLUSH_N_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static void __dc_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) const int r = ARC_REG_DC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* For kernel mappings cache operation: index is same as paddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned long sz, const int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) __before_dc_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) __cache_line_loop(paddr, vaddr, sz, op, full_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) __after_dc_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) #define __dc_entire_op(op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) #define __dc_disable()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #define __dc_enable()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #define __dc_line_op(paddr, vaddr, sz, op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) #define __dc_line_op_k(paddr, sz, op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) #endif /* CONFIG_ARC_HAS_DCACHE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) #ifdef CONFIG_ARC_HAS_ICACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static inline void __ic_entire_inv(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) write_aux_reg(ARC_REG_IC_IVIC, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct ic_inv_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) phys_addr_t paddr, vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) int sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static void __ic_line_inv_vaddr_helper(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct ic_inv_args *ic_inv = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct ic_inv_args ic_inv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) .paddr = paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) .vaddr = vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) .sz = sz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) #else /* !CONFIG_ARC_HAS_ICACHE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) #define __ic_entire_inv()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) #define __ic_line_inv_vaddr(pstart, vstart, sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) #endif /* CONFIG_ARC_HAS_ICACHE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) #ifdef CONFIG_ISA_ARCV2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * SLC is shared between all cores and concurrent aux operations from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * multiple cores need to be serialized using a spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * A concurrent operation can be silently ignored and/or the old/new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * below)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static DEFINE_SPINLOCK(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) unsigned int ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) phys_addr_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) spin_lock_irqsave(&lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * - b'000 (default) is Flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * - b'001 is Invalidate if CTRL.IM == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Don't rely on default value of IM bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (!(op & OP_FLUSH)) /* i.e. OP_INV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ctrl |= SLC_CTRL_IM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (op & OP_INV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ctrl &= ~SLC_CTRL_RGN_OP_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * Lower bits are ignored, no need to clip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * END needs to be setup before START (latter triggers the operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * END can't be same as START, so add (l2_line_sz - 1) to sz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) end = paddr + sz + l2_line_sz - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (is_pae40_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (is_pae40_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) read_aux_reg(ARC_REG_SLC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) spin_unlock_irqrestore(&lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) #ifdef CONFIG_ISA_ARCV2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * SLC is shared between all cores and concurrent aux operations from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * multiple cores need to be serialized using a spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * A concurrent operation can be silently ignored and/or the old/new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * below)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static DEFINE_SPINLOCK(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) unsigned int ctrl, cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int num_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) spin_lock_irqsave(&lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Don't rely on default value of IM bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (!(op & OP_FLUSH)) /* i.e. OP_INV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ctrl |= SLC_CTRL_IM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) sz += paddr & ~SLC_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) paddr &= SLC_LINE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) num_lines = DIV_ROUND_UP(sz, l2_line_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) while (num_lines-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) write_aux_reg(cmd, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) paddr += l2_line_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) read_aux_reg(ARC_REG_SLC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) spin_unlock_irqrestore(&lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) noinline static void slc_entire_op(const int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) unsigned int ctrl, r = ARC_REG_SLC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ctrl = read_aux_reg(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!(op & OP_FLUSH)) /* i.e. OP_INV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ctrl |= SLC_CTRL_IM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) write_aux_reg(r, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) read_aux_reg(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* Important to wait for flush to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) while (read_aux_reg(r) & SLC_CTRL_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static inline void arc_slc_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) const int r = ARC_REG_SLC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) slc_entire_op(OP_FLUSH_N_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) static inline void arc_slc_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) const int r = ARC_REG_SLC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /***********************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * Exported APIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * Handle cache congruency of kernel and userspace mappings of page when kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * writes-to/reads-from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * -In SMP, if hardware caches are coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * There's a corollary case, where kernel READs from a userspace mapped page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * If the U-mapping is not congruent to to K-mapping, former needs flushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) void flush_dcache_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (!cache_is_vipt_aliasing()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) clear_bit(PG_dc_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* don't handle anon pages here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (!mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * pagecache page, file not yet mapped to userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * Make a note that K-mapping is dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (!mapping_mapped(mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) clear_bit(PG_dc_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) } else if (page_mapcount(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* kernel reading from page with U-mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) phys_addr_t paddr = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) unsigned long vaddr = page->index << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (addr_not_cache_congruent(paddr, vaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) __flush_dcache_page(paddr, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) EXPORT_SYMBOL(flush_dcache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * DMA ops for systems with L1 cache only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * Make memory coherent with L1 cache by flushing/invalidating L1 lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) __dc_line_op_k(start, sz, OP_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) __dc_line_op_k(start, sz, OP_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * DMA ops for systems with both L1 and L2 caches, but without IOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * Both L1 and L2 lines need to be explicitly flushed/invalidated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) slc_op(start, sz, OP_FLUSH_N_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) __dc_line_op_k(start, sz, OP_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) slc_op(start, sz, OP_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) __dc_line_op_k(start, sz, OP_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) slc_op(start, sz, OP_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * Exported DMA API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) __dma_cache_wback_inv(start, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) EXPORT_SYMBOL(dma_cache_wback_inv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) void dma_cache_inv(phys_addr_t start, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) __dma_cache_inv(start, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) EXPORT_SYMBOL(dma_cache_inv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) void dma_cache_wback(phys_addr_t start, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) __dma_cache_wback(start, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) EXPORT_SYMBOL(dma_cache_wback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * This is API for making I/D Caches consistent when modifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * kernel code (loadable modules, kprobes, kgdb...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * This is called on insmod, with kernel virtual address for CODE of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * the module. ARC cache maintenance ops require PHY address thus we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * need to convert vmalloc addr to PHY addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) void flush_icache_range(unsigned long kstart, unsigned long kend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) unsigned int tot_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* Shortcut for bigger flush ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * Here we don't care if this was kernel virtual or phy addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) tot_sz = kend - kstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (tot_sz > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* Case: Kernel Phy addr (0x8000_0000 onwards) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (likely(kstart > PAGE_OFFSET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * The 2nd arg despite being paddr will be used to index icache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * This is OK since no alternate virtual mappings will exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * given the callers for this case: kprobe/kgdb in built-in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * kernel code only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) __sync_icache_dcache(kstart, kstart, kend - kstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * (1) ARC Cache Maintenance ops only take Phy addr, hence special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * handling of kernel vaddr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * it still needs to handle a 2 page scenario, where the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * straddles across 2 virtual pages and hence need for loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) while (tot_sz > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) unsigned int off, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) unsigned long phy, pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) off = kstart % PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) pfn = vmalloc_to_pfn((void *)kstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) phy = (pfn << PAGE_SHIFT) + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) __sync_icache_dcache(phy, kstart, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) kstart += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) tot_sz -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) EXPORT_SYMBOL(flush_icache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * General purpose helper to make I and D cache lines consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * @paddr is phy addr of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * However in one instance, when called by kprobe (for a breakpt in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * use a paddr to index the cache (despite VIPT). This is fine since since a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * builtin kernel page will not have any virtual mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * kprobe on loadable module will be kernel vaddr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) __ic_line_inv_vaddr(paddr, vaddr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* wrapper to compile time eliminate alignment checks in flush loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * wrapper to clearout kernel or userspace mappings of a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * For kernel mappings @vaddr == @paddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) noinline void flush_cache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) __ic_entire_inv();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) __dc_entire_op(OP_FLUSH_N_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) void flush_cache_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) phys_addr_t paddr = pfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) u_vaddr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) __flush_dcache_page(paddr, u_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) __inv_icache_page(paddr, u_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) void flush_anon_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) unsigned long u_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* TBD: do we really need to clear the kernel mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) __flush_dcache_page((phys_addr_t)page_address(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) (phys_addr_t)page_address(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) void copy_user_highpage(struct page *to, struct page *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) unsigned long u_vaddr, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) void *kfrom = kmap_atomic(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) void *kto = kmap_atomic(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) int clean_src_k_mappings = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * If SRC page was already mapped in userspace AND it's U-mapping is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * not congruent with K-mapping, sync former to physical page so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * K-mapping in memcpy below, sees the right data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * equally valid for SRC page as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * For !VIPT cache, all of this gets compiled out as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * addr_not_cache_congruent() is 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) __flush_dcache_page((unsigned long)kfrom, u_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) clean_src_k_mappings = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) copy_page(kto, kfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * Mark DST page K-mapping as dirty for a later finalization by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * update_mmu_cache(). Although the finalization could have been done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * here as well (given that both vaddr/paddr are available).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * But update_mmu_cache() already has code to do that for other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * non copied user pages (e.g. read faults which wire in pagecache page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * directly).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) clear_bit(PG_dc_clean, &to->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * if SRC was already usermapped and non-congruent to kernel mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * sync the kernel mapping back to physical page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (clean_src_k_mappings) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) set_bit(PG_dc_clean, &from->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) clear_bit(PG_dc_clean, &from->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) kunmap_atomic(kto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) kunmap_atomic(kfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) clear_page(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) clear_bit(PG_dc_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) EXPORT_SYMBOL(clear_user_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * Explicit Cache flush request from user space via syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * Needed for JITs which generate code on the fly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* TBD: optimize this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * IO-Coherency (IOC) setup rules:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * 1. Needs to be at system level, so only once by Master core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * Non-Masters need not be accessing caches at that time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * - They are either HALT_ON_RESET and kick started much later or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * doesn't perturb caches or coherency unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * otherwise any straggler data might behave strangely post IOC enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * Coherency transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) noinline void __init arc_ioc_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) unsigned int ioc_base, mem_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * If IOC was already enabled (due to bootloader) it technically needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * be reconfigured with aperture base,size corresponding to Linux memory map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * which will certainly be different than uboot's. But disabling and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * reenabling IOC when DMA might be potentially active is tricky business.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * To avoid random memory issues later, just panic here and ask user to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * upgrade bootloader to one which doesn't enable IOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) panic("IOC already enabled, please upgrade bootloader!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (!ioc_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) /* Flush + invalidate + disable L1 dcache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) __dc_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* Flush + invalidate SLC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (read_aux_reg(ARC_REG_SLC_BCR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) slc_entire_op(OP_FLUSH_N_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * currently IOC Aperture covers entire DDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * TBD: fix for PGU + 1GB of low mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * TBD: fix for PAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) mem_sz = arc_get_mem_sz();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!is_power_of_2(mem_sz) || mem_sz < 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) panic("IOC Aperture size must be power of 2 larger than 4KB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * so setting 0x11 implies 512MB, 0x12 implies 1GB...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* for now assume kernel base is start of IOC aperture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) ioc_base = CONFIG_LINUX_RAM_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (ioc_base % mem_sz != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) panic("IOC Aperture start must be aligned to the size of the aperture");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /* Re-enable L1 dcache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) __dc_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * Cache related boot time checks/setups only needed on master CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * Assume SMP only, so all cores will have same cache config. A check on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * one core suffices for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * - IOC setup / dma callbacks only need to be done once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) void __init arc_cache_init_master(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) unsigned int __maybe_unused cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (!ic->line_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) panic("cache support enabled but non-existent cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (ic->line_len != L1_CACHE_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) panic("ICache line [%d] != kernel Config [%d]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) ic->line_len, L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * pair to provide vaddr/paddr respectively, just as in MMU v3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (is_isa_arcv2() && ic->alias)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) _cache_line_loop_ic_fn = __cache_line_loop_v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) _cache_line_loop_ic_fn = __cache_line_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (!dc->line_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) panic("cache support enabled but non-existent cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (dc->line_len != L1_CACHE_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) panic("DCache line [%d] != kernel Config [%d]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) dc->line_len, L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (is_isa_arcompact()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (dc->alias) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!handled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (CACHE_COLORS_NUM != num_colors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) panic("CACHE_COLORS_NUM not optimized for config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) } else if (!dc->alias && handled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * or equal to any cache line length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) "SMP_CACHE_BYTES must be >= any cache line length");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) panic("L2 Cache line [%d] > kernel Config [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) l2_line_sz, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* Note that SLC disable not formally supported till HS 3.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (is_isa_arcv2() && l2_line_sz && !slc_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) arc_slc_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (is_isa_arcv2() && ioc_exists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) arc_ioc_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (is_isa_arcv2() && l2_line_sz && slc_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) __dma_cache_inv = __dma_cache_inv_slc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) __dma_cache_wback = __dma_cache_wback_slc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) __dma_cache_inv = __dma_cache_inv_l1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) __dma_cache_wback = __dma_cache_wback_l1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * In case of IOC (say IOC+SLC case), pointers above could still be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * but end up not being relevant as the first function in chain is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * called at all for devices using coherent DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) void __ref arc_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) unsigned int __maybe_unused cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) char str[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (!cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) arc_cache_init_master();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * In PAE regime, TLB and cache maintenance ops take wider addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * And even if PAE is not enabled in kernel, the upper 32-bits still need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * to be zeroed to keep the ops sane.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * As an optimization for more common !PAE enabled case, zero them out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * once at init, rather than checking/setting to 0 for every runtime op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (l2_line_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }