^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __UM_CACHE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __UM_CACHE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #elif defined(CONFIG_UML_X86) /* 64-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) # define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /* XXX: this was taken from x86, now it's completely random. Luckily only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * affects SMP padding. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) # define L1_CACHE_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #endif