^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * include/asm-sh/cache.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 1999 (C) Niibe Yutaka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 2002, 2003 (C) Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifndef __ASM_SH_CACHE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define __ASM_SH_CACHE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <cpu/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define __read_mostly __section(".data..read_mostly")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct cache_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned int ways; /* Number of cache ways */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned int sets; /* Number of cache sets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned int linesz; /* Cache line size (bytes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned int way_size; /* sets * line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * way_incr is the address offset for accessing the next way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * in memory mapped cache array ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned int way_incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned int entry_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned int entry_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Compute a mask which selects the address bits which overlap between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * 1. those used to select the cache set during indexing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * 2. those in the physical page number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned int alias_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned int n_aliases; /* Number of aliases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #endif /* __ASM_SH_CACHE_H */