^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 1999 VA Linux Systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifndef _ASM_ACPI_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define _ASM_ACPI_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <acpi/pdc_intel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) extern int acpi_lapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define acpi_disabled 0 /* ACPI always enabled on IA64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define acpi_noirq 0 /* ACPI always enabled on IA64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static inline bool acpi_has_cpu_in_madt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return !!acpi_lapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static inline void disable_acpi(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int acpi_request_vector (u32 int_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Low-level suspend routine. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) extern int acpi_suspend_lowlevel(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline unsigned long acpi_get_wakeup_address(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Record the cpei override flag and current logical cpu. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * useful for CPU removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) extern unsigned int can_cpei_retarget(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) extern unsigned int is_cpu_cpei_target(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) extern void set_cpei_target_cpu(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) extern unsigned int get_cpei_target_cpu(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) extern void prefill_possible_map(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #ifdef CONFIG_ACPI_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) extern int additional_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define additional_cpus 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #if MAX_NUMNODES > 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define MAX_PXM_DOMAINS MAX_NUMNODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define MAX_PXM_DOMAINS (256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) extern int pxm_to_nid_map[MAX_PXM_DOMAINS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline bool arch_has_acpi_pdc(void) { return true; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static inline void arch_acpi_set_pdc_bits(u32 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) extern cpumask_t early_cpu_possible_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define for_each_possible_early_cpu(cpu) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) for_each_cpu((cpu), &early_cpu_possible_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int low_cpu, high_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int next_nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) low_cpu = cpumask_weight(&early_cpu_possible_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) high_cpu = max(low_cpu, min_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) for (cpu = low_cpu; cpu < high_cpu; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cpumask_set_cpu(cpu, &early_cpu_possible_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) node_cpuid[cpu].nid = next_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) next_nid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (next_nid >= num_online_nodes())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) next_nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) extern void acpi_numa_fixup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #endif /* CONFIG_ACPI_NUMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif /*__KERNEL__*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif /*_ASM_ACPI_H*/