Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *	Routines to identify caches on Intel CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *	Changes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *	Venkatesh Pallipadi	: Adding cache identification through cpuid(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *	Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *	Andi Kleen / Andreas Herrmann	: CPUID4 emulation on AMD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/cacheinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/cacheinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <asm/amd_nb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "cpu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define LVL_1_INST	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define LVL_1_DATA	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define LVL_2		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define LVL_3		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define LVL_TRACE	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) struct _cache_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	unsigned char descriptor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	char cache_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	short size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define MB(x)	((x) * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) /* All the cache descriptor types we care about (no TLB or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)    trace cache entries) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static const struct _cache_table cache_table[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	{ 0x06, LVL_1_INST, 8 },	/* 4-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	{ 0x08, LVL_1_INST, 16 },	/* 4-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	{ 0x09, LVL_1_INST, 32 },	/* 4-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	{ 0x0a, LVL_1_DATA, 8 },	/* 2 way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	{ 0x0c, LVL_1_DATA, 16 },	/* 4-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	{ 0x0d, LVL_1_DATA, 16 },	/* 4-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	{ 0x0e, LVL_1_DATA, 24 },	/* 6-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	{ 0x21, LVL_2,      256 },	/* 8-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	{ 0x22, LVL_3,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	{ 0x23, LVL_3,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	{ 0x25, LVL_3,      MB(2) },	/* 8-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	{ 0x29, LVL_3,      MB(4) },	/* 8-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	{ 0x2c, LVL_1_DATA, 32 },	/* 8-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	{ 0x30, LVL_1_INST, 32 },	/* 8-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	{ 0x39, LVL_2,      128 },	/* 4-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	{ 0x3a, LVL_2,      192 },	/* 6-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	{ 0x3b, LVL_2,      128 },	/* 2-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	{ 0x3c, LVL_2,      256 },	/* 4-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	{ 0x3d, LVL_2,      384 },	/* 6-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	{ 0x3e, LVL_2,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	{ 0x3f, LVL_2,      256 },	/* 2-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	{ 0x41, LVL_2,      128 },	/* 4-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	{ 0x42, LVL_2,      256 },	/* 4-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	{ 0x43, LVL_2,      512 },	/* 4-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	{ 0x44, LVL_2,      MB(1) },	/* 4-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	{ 0x45, LVL_2,      MB(2) },	/* 4-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	{ 0x46, LVL_3,      MB(4) },	/* 4-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	{ 0x47, LVL_3,      MB(8) },	/* 8-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	{ 0x48, LVL_2,      MB(3) },	/* 12-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	{ 0x49, LVL_3,      MB(4) },	/* 16-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	{ 0x4a, LVL_3,      MB(6) },	/* 12-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	{ 0x4b, LVL_3,      MB(8) },	/* 16-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	{ 0x4c, LVL_3,      MB(12) },	/* 12-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	{ 0x4d, LVL_3,      MB(16) },	/* 16-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	{ 0x4e, LVL_2,      MB(6) },	/* 24-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	{ 0x60, LVL_1_DATA, 16 },	/* 8-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	{ 0x66, LVL_1_DATA, 8 },	/* 4-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	{ 0x67, LVL_1_DATA, 16 },	/* 4-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	{ 0x68, LVL_1_DATA, 32 },	/* 4-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	{ 0x70, LVL_TRACE,  12 },	/* 8-way set assoc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	{ 0x71, LVL_TRACE,  16 },	/* 8-way set assoc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	{ 0x72, LVL_TRACE,  32 },	/* 8-way set assoc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	{ 0x73, LVL_TRACE,  64 },	/* 8-way set assoc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	{ 0x78, LVL_2,      MB(1) },	/* 4-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	{ 0x79, LVL_2,      128 },	/* 8-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	{ 0x7a, LVL_2,      256 },	/* 8-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	{ 0x7b, LVL_2,      512 },	/* 8-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	{ 0x7c, LVL_2,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	{ 0x7d, LVL_2,      MB(2) },	/* 8-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	{ 0x7f, LVL_2,      512 },	/* 2-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	{ 0x80, LVL_2,      512 },	/* 8-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	{ 0x82, LVL_2,      256 },	/* 8-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	{ 0x83, LVL_2,      512 },	/* 8-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	{ 0x84, LVL_2,      MB(1) },	/* 8-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	{ 0x85, LVL_2,      MB(2) },	/* 8-way set assoc, 32 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	{ 0x86, LVL_2,      512 },	/* 4-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	{ 0x87, LVL_2,      MB(1) },	/* 8-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	{ 0xd0, LVL_3,      512 },	/* 4-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	{ 0xd1, LVL_3,      MB(1) },	/* 4-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	{ 0xd2, LVL_3,      MB(2) },	/* 4-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	{ 0xd6, LVL_3,      MB(1) },	/* 8-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	{ 0xd7, LVL_3,      MB(2) },	/* 8-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	{ 0xd8, LVL_3,      MB(4) },	/* 12-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	{ 0xdc, LVL_3,      MB(2) },	/* 12-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	{ 0xdd, LVL_3,      MB(4) },	/* 12-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	{ 0xde, LVL_3,      MB(8) },	/* 12-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	{ 0xe2, LVL_3,      MB(2) },	/* 16-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	{ 0xe3, LVL_3,      MB(4) },	/* 16-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	{ 0xe4, LVL_3,      MB(8) },	/* 16-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	{ 0xea, LVL_3,      MB(12) },	/* 24-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	{ 0xeb, LVL_3,      MB(18) },	/* 24-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	{ 0xec, LVL_3,      MB(24) },	/* 24-way set assoc, 64 byte line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	{ 0x00, 0, 0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) enum _cache_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	CTYPE_NULL = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	CTYPE_DATA = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	CTYPE_INST = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	CTYPE_UNIFIED = 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) union _cpuid4_leaf_eax {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		enum _cache_type	type:5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		unsigned int		level:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		unsigned int		is_self_initializing:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		unsigned int		is_fully_associative:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		unsigned int		reserved:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		unsigned int		num_threads_sharing:12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		unsigned int		num_cores_on_die:6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	} split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	u32 full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) union _cpuid4_leaf_ebx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		unsigned int		coherency_line_size:12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		unsigned int		physical_line_partition:10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		unsigned int		ways_of_associativity:10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	} split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	u32 full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) union _cpuid4_leaf_ecx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		unsigned int		number_of_sets:32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	} split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	u32 full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) struct _cpuid4_info_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	union _cpuid4_leaf_eax eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	union _cpuid4_leaf_ebx ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	union _cpuid4_leaf_ecx ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	struct amd_northbridge *nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) static unsigned short num_cache_leaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) /* AMD doesn't have CPUID4. Emulate it here to report the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)    information to the user.  This makes some assumptions about the machine:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)    L2 not shared, no SMT etc. that is currently true on AMD CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)    In theory the TLBs could be reported as fake type (they are in "dummy").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)    Maybe later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) union l1_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		unsigned line_size:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		unsigned lines_per_tag:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		unsigned assoc:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		unsigned size_in_kb:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	unsigned val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) union l2_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		unsigned line_size:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		unsigned lines_per_tag:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		unsigned assoc:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		unsigned size_in_kb:16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	unsigned val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) union l3_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		unsigned line_size:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		unsigned lines_per_tag:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		unsigned assoc:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		unsigned res:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		unsigned size_encoded:14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	unsigned val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) static const unsigned short assocs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	[1] = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	[2] = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	[4] = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	[6] = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	[8] = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	[0xa] = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	[0xb] = 48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	[0xc] = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	[0xd] = 96,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	[0xe] = 128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	[0xf] = 0xffff /* fully associative - no way to show this currently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) static const unsigned char levels[] = { 1, 1, 2, 3 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) static const unsigned char types[] = { 1, 2, 3, 3 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) static const enum cache_type cache_type_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	[CTYPE_NULL] = CACHE_TYPE_NOCACHE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	[CTYPE_DATA] = CACHE_TYPE_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	[CTYPE_INST] = CACHE_TYPE_INST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	[CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		     union _cpuid4_leaf_ebx *ebx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		     union _cpuid4_leaf_ecx *ecx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	unsigned dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	unsigned line_size, lines_per_tag, assoc, size_in_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	union l1_cache l1i, l1d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	union l2_cache l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	union l3_cache l3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	union l1_cache *l1 = &l1d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	eax->full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	ebx->full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	ecx->full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	switch (leaf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		l1 = &l1i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		if (!l1->val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		assoc = assocs[l1->assoc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		line_size = l1->line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		lines_per_tag = l1->lines_per_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		size_in_kb = l1->size_in_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		if (!l2.val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		assoc = assocs[l2.assoc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		line_size = l2.line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		lines_per_tag = l2.lines_per_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		/* cpu_data has errata corrections for K7 applied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		if (!l3.val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		assoc = assocs[l3.assoc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		line_size = l3.line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		lines_per_tag = l3.lines_per_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		size_in_kb = l3.size_encoded * 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			size_in_kb = size_in_kb >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			assoc = assoc >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	eax->split.is_self_initializing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	eax->split.type = types[leaf];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	eax->split.level = levels[leaf];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	eax->split.num_threads_sharing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (assoc == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		eax->split.is_fully_associative = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	ebx->split.coherency_line_size = line_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	ebx->split.ways_of_associativity = assoc - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	ebx->split.physical_line_partition = lines_per_tag - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		(ebx->split.ways_of_associativity + 1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  * L3 cache descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) static void amd_calc_l3_indices(struct amd_northbridge *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	struct amd_l3_cache *l3 = &nb->l3_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	unsigned int sc0, sc1, sc2, sc3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	pci_read_config_dword(nb->misc, 0x1C4, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	/* calculate subcache sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	l3->subcaches[0] = sc0 = !(val & BIT(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	l3->subcaches[1] = sc1 = !(val & BIT(4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (boot_cpu_data.x86 == 0x15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		l3->subcaches[0] = sc0 += !(val & BIT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		l3->subcaches[1] = sc1 += !(val & BIT(5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	l3->subcaches[2] = sc2 = !(val & BIT(8))  + !(val & BIT(9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * check whether a slot used for disabling an L3 index is occupied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * @l3: L3 cache descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * @slot: slot number (0..1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * @returns: the disabled index if used or negative value if slot free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	unsigned int reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	/* check whether this slot is activated already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (reg & (3UL << 30))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		return reg & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 				  unsigned int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct amd_northbridge *nb = this_leaf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	index = amd_get_l3_disable_slot(nb, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	if (index >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		return sprintf(buf, "%d\n", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	return sprintf(buf, "FREE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) #define SHOW_CACHE_DISABLE(slot)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) static ssize_t								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) cache_disable_##slot##_show(struct device *dev,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			    struct device_attribute *attr, char *buf)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	return show_cache_disable(this_leaf, buf, slot);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) SHOW_CACHE_DISABLE(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) SHOW_CACHE_DISABLE(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 				 unsigned slot, unsigned long idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	idx |= BIT(30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	 *  disable index in all 4 subcaches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		u32 reg = idx | (i << 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		if (!nb->l3_cache.subcaches[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		 * We need to WBINVD on a core on the node containing the L3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		 * cache which indices we disable therefore a simple wbinvd()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		 * is not sufficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		wbinvd_on_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		reg |= BIT(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404)  * disable a L3 cache index by using a disable-slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  * @l3:    L3 cache descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  * @cpu:   A CPU on the node containing the L3 cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * @slot:  slot number (0..1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  * @index: index to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  * @return: 0 on success, error status on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			    unsigned slot, unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	/*  check if @slot is already used or the index is already disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	ret = amd_get_l3_disable_slot(nb, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	if (index > nb->l3_cache.indices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	/* check whether the other slot has disabled the same index already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (index == amd_get_l3_disable_slot(nb, !slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	amd_l3_disable_index(nb, cpu, slot, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 				   const char *buf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 				   unsigned int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	unsigned long val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	int cpu, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	struct amd_northbridge *nb = this_leaf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	cpu = cpumask_first(&this_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (kstrtoul(buf, 10, &val) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	err = amd_set_l3_disable_slot(nb, cpu, slot, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		if (err == -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			pr_warn("L3 slot %d in use/index already disabled!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 				   slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) #define STORE_CACHE_DISABLE(slot)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) static ssize_t								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) cache_disable_##slot##_store(struct device *dev,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			     struct device_attribute *attr,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			     const char *buf, size_t count)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	return store_cache_disable(this_leaf, buf, count, slot);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) STORE_CACHE_DISABLE(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) STORE_CACHE_DISABLE(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) static ssize_t subcaches_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			      struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	int cpu = cpumask_first(&this_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) static ssize_t subcaches_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			       struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			       const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	int cpu = cpumask_first(&this_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (kstrtoul(buf, 16, &val) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	if (amd_set_subcaches(cpu, val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) static DEVICE_ATTR_RW(cache_disable_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) static DEVICE_ATTR_RW(cache_disable_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static DEVICE_ATTR_RW(subcaches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) static umode_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) cache_private_attrs_is_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			       struct attribute *attr, int unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	struct device *dev = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	umode_t mode = attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	if (!this_leaf->priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	if ((attr == &dev_attr_subcaches.attr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	    amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	if ((attr == &dev_attr_cache_disable_0.attr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	     attr == &dev_attr_cache_disable_1.attr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	    amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static struct attribute_group cache_private_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	.is_visible = cache_private_attrs_is_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) static void init_amd_l3_attrs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	int n = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	static struct attribute **amd_l3_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	if (amd_l3_attrs) /* already initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		n += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		n += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	if (!amd_l3_attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	cache_private_group.attrs = amd_l3_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) const struct attribute_group *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) cache_get_priv_group(struct cacheinfo *this_leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	struct amd_northbridge *nb = this_leaf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	if (this_leaf->level < 3 || !nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	if (nb && nb->l3_cache.indices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		init_amd_l3_attrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	return &cache_private_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	/* only for L3, and not in virtualized environments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (index < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	node = amd_get_nb_id(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	this_leaf->nb = node_to_amd_nb(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		amd_calc_l3_indices(this_leaf->nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) #define amd_init_l3_cache(x, y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) #endif  /* CONFIG_AMD_NB && CONFIG_SYSFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	union _cpuid4_leaf_eax	eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	union _cpuid4_leaf_ebx	ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	union _cpuid4_leaf_ecx	ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	unsigned		edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		if (boot_cpu_has(X86_FEATURE_TOPOEXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			cpuid_count(0x8000001d, index, &eax.full,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 				    &ebx.full, &ecx.full, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			amd_cpuid4(index, &eax, &ebx, &ecx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		amd_init_l3_cache(this_leaf, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	} else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		cpuid_count(0x8000001d, index, &eax.full,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			    &ebx.full, &ecx.full, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		amd_init_l3_cache(this_leaf, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	if (eax.split.type == CTYPE_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		return -EIO; /* better error ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	this_leaf->eax = eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	this_leaf->ebx = ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	this_leaf->ecx = ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	this_leaf->size = (ecx.split.number_of_sets          + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			  (ebx.split.coherency_line_size     + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			  (ebx.split.physical_line_partition + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			  (ebx.split.ways_of_associativity   + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) static int find_num_cache_leaves(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	unsigned int		eax, ebx, ecx, edx, op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	union _cpuid4_leaf_eax	cache_eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	int 			i = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	if (c->x86_vendor == X86_VENDOR_AMD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	    c->x86_vendor == X86_VENDOR_HYGON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		op = 0x8000001d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		op = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		/* Do cpuid(op) loop to find out num_cache_leaves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		cache_eax.full = eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	} while (cache_eax.split.type != CTYPE_NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	 * We may have multiple LLCs if L3 caches exist, so check if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	 * have an L3 cache by looking at the L3 cache CPUID leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	if (!cpuid_edx(0x80000006))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (c->x86 < 0x17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		/* LLC is at the node level. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	} else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		 * LLC is at the core complex level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		 * Core complex ID is ApicId[3] for these processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		 * LLC ID is calculated from the number of threads sharing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		 * cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		 * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		u32 eax, ebx, ecx, edx, num_sharing_cache = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		u32 llc_index = find_num_cache_leaves(c) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		if (eax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		if (num_sharing_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			int bits = get_count_order(num_sharing_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 * We may have multiple LLCs if L3 caches exist, so check if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 * have an L3 cache by looking at the L3 cache CPUID leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	if (!cpuid_edx(0x80000006))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	 * LLC is at the core complex level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	 * Core complex ID is ApicId[3] for these processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) void init_amd_cacheinfo(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		num_cache_leaves = find_num_cache_leaves(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	} else if (c->extended_cpuid_level >= 0x80000006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		if (cpuid_edx(0x80000006) & 0xf000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			num_cache_leaves = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			num_cache_leaves = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	num_cache_leaves = find_num_cache_leaves(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) void init_intel_cacheinfo(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	/* Cache sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	unsigned int cpu = c->cpu_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (c->cpuid_level > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		static int is_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		if (is_initialized == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			/* Init num_cache_leaves from boot CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			num_cache_leaves = find_num_cache_leaves(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			is_initialized++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		 * Whenever possible use cpuid(4), deterministic cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		 * parameters cpuid leaf to find the cache details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		for (i = 0; i < num_cache_leaves; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			struct _cpuid4_info_regs this_leaf = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			retval = cpuid4_cache_lookup_regs(i, &this_leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			switch (this_leaf.eax.split.level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 				if (this_leaf.eax.split.type == CTYPE_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 					new_l1d = this_leaf.size/1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 				else if (this_leaf.eax.split.type == CTYPE_INST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 					new_l1i = this_leaf.size/1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 				new_l2 = this_leaf.size/1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 				num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 				index_msb = get_count_order(num_threads_sharing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 				l2_id = c->apicid & ~((1 << index_msb) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 				new_l3 = this_leaf.size/1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 				num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 				index_msb = get_count_order(num_threads_sharing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 				l3_id = c->apicid & ~((1 << index_msb) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	 * trace cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		/* supports eax=2  call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		int j, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		unsigned int regs[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		unsigned char *dp = (unsigned char *)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		int only_trace = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		if (num_cache_leaves != 0 && c->x86 == 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			only_trace = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		/* Number of times to iterate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		n = cpuid_eax(2) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		for (i = 0 ; i < n ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			/* If bit 31 is set, this is an unknown format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			for (j = 0 ; j < 3 ; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 				if (regs[j] & (1 << 31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 					regs[j] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			/* Byte 0 is level count, not a descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			for (j = 1 ; j < 16 ; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 				unsigned char des = dp[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				unsigned char k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 				/* look up this descriptor in the table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 				while (cache_table[k].descriptor != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 					if (cache_table[k].descriptor == des) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 						if (only_trace && cache_table[k].cache_type != LVL_TRACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 							break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 						switch (cache_table[k].cache_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 						case LVL_1_INST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 							l1i += cache_table[k].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 							break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 						case LVL_1_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 							l1d += cache_table[k].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 							break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 						case LVL_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 							l2 += cache_table[k].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 							break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 						case LVL_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 							l3 += cache_table[k].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 							break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 						case LVL_TRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 							trace += cache_table[k].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 							break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 						}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 					k++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	if (new_l1d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		l1d = new_l1d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	if (new_l1i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		l1i = new_l1i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (new_l2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		l2 = new_l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		per_cpu(cpu_llc_id, cpu) = l2_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (new_l3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		l3 = new_l3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		per_cpu(cpu_llc_id, cpu) = l3_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	 * turns means that the only possibility is SMT (as indicated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 * c->phys_proc_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (!l2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		cpu_detect_cache_sizes(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 				    struct _cpuid4_info_regs *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	struct cacheinfo *this_leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	int i, sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	 * For L3, always use the pre-calculated cpu_llc_shared_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	 * to derive shared_cpu_map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (index == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			this_cpu_ci = get_cpu_cacheinfo(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			if (!this_cpu_ci->info_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			this_leaf = this_cpu_ci->info_list + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 				if (!cpu_online(sibling))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 				cpumask_set_cpu(sibling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 						&this_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	} else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		unsigned int apicid, nshared, first, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		nshared = base->eax.split.num_threads_sharing + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		apicid = cpu_data(cpu).apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		first = apicid - (apicid % nshared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		last = first + nshared - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		for_each_online_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			this_cpu_ci = get_cpu_cacheinfo(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			if (!this_cpu_ci->info_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			apicid = cpu_data(i).apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			if ((apicid < first) || (apicid > last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			this_leaf = this_cpu_ci->info_list + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			for_each_online_cpu(sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 				apicid = cpu_data(sibling).apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 				if ((apicid < first) || (apicid > last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 				cpumask_set_cpu(sibling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 						&this_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) static void __cache_cpumap_setup(unsigned int cpu, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 				 struct _cpuid4_info_regs *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	struct cacheinfo *this_leaf, *sibling_leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	unsigned long num_threads_sharing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	int index_msb, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	struct cpuinfo_x86 *c = &cpu_data(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (c->x86_vendor == X86_VENDOR_AMD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	    c->x86_vendor == X86_VENDOR_HYGON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		if (__cache_amd_cpumap_setup(cpu, index, base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	this_leaf = this_cpu_ci->info_list + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	num_threads_sharing = 1 + base->eax.split.num_threads_sharing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	if (num_threads_sharing == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	index_msb = get_count_order(num_threads_sharing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	for_each_online_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			if (i == cpu || !sib_cpu_ci->info_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 				continue;/* skip if itself or no cacheinfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			sibling_leaf = sib_cpu_ci->info_list + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) static void ci_leaf_init(struct cacheinfo *this_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			 struct _cpuid4_info_regs *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	this_leaf->id = base->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	this_leaf->attributes = CACHE_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	this_leaf->level = base->eax.split.level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	this_leaf->type = cache_type_map[base->eax.split.type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	this_leaf->coherency_line_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 				base->ebx.split.coherency_line_size + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	this_leaf->ways_of_associativity =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				base->ebx.split.ways_of_associativity + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	this_leaf->size = base->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	this_leaf->physical_line_partition =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 				base->ebx.split.physical_line_partition + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	this_leaf->priv = base->nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) int init_cache_level(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	if (!num_cache_leaves)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (!this_cpu_ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	this_cpu_ci->num_levels = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	this_cpu_ci->num_leaves = num_cache_leaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  * The max shared threads number comes from CPUID.4:EAX[25-14] with input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  * ECX as cache index. Then right shift apicid by the number's order to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  * cache id for this cache node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	struct cpuinfo_x86 *c = &cpu_data(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	unsigned long num_threads_sharing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	int index_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	index_msb = get_count_order(num_threads_sharing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	id4_regs->id = c->apicid >> index_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) int populate_cache_leaves(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	unsigned int idx, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	struct _cpuid4_info_regs id4_regs = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		get_cache_id(cpu, &id4_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		ci_leaf_init(this_leaf++, &id4_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		__cache_cpumap_setup(cpu, idx, &id4_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	this_cpu_ci->cpu_map_populated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }