Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Author: Joerg Roedel <jroedel@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *         Leo Duran <leo.duran@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #define pr_fmt(fmt)     "AMD-Vi: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #define dev_fmt(fmt)    pr_fmt(fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/amd-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/mem_encrypt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/pci-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/msidef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/gart.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <asm/x86_init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/iommu_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/io_apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <asm/irq_remapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "amd_iommu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "../irq_remapping.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * definitions for the ACPI scanning code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define IVRS_HEADER_LENGTH 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define ACPI_IVHD_TYPE_MAX_SUPPORTED	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define ACPI_IVMD_TYPE_ALL              0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define ACPI_IVMD_TYPE                  0x21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define ACPI_IVMD_TYPE_RANGE            0x22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define IVHD_DEV_ALL                    0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define IVHD_DEV_SELECT                 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define IVHD_DEV_SELECT_RANGE_START     0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define IVHD_DEV_RANGE_END              0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define IVHD_DEV_ALIAS                  0x42
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define IVHD_DEV_ALIAS_RANGE            0x43
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define IVHD_DEV_EXT_SELECT             0x46
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define IVHD_DEV_EXT_SELECT_RANGE       0x47
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define IVHD_DEV_SPECIAL		0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define IVHD_DEV_ACPI_HID		0xf0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define UID_NOT_PRESENT                 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define UID_IS_INTEGER                  1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define UID_IS_CHARACTER                2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define IVHD_SPECIAL_IOAPIC		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define IVHD_SPECIAL_HPET		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define IVHD_FLAG_HT_TUN_EN_MASK        0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define IVHD_FLAG_PASSPW_EN_MASK        0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define IVHD_FLAG_ISOC_EN_MASK          0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define IVMD_FLAG_EXCL_RANGE            0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define IVMD_FLAG_IW                    0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define IVMD_FLAG_IR                    0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define IVMD_FLAG_UNITY_MAP             0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define ACPI_DEVFLAG_INITPASS           0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define ACPI_DEVFLAG_EXTINT             0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define ACPI_DEVFLAG_NMI                0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define ACPI_DEVFLAG_SYSMGT1            0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define ACPI_DEVFLAG_SYSMGT2            0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define ACPI_DEVFLAG_LINT0              0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define ACPI_DEVFLAG_LINT1              0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define ACPI_DEVFLAG_ATSDIS             0x10000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define LOOP_TIMEOUT	100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  * ACPI table definitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  * These data structures are laid over the table to parse the important values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  * out of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) extern const struct iommu_ops amd_iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * structure describing one IOMMU in the ACPI table. Typically followed by one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  * or more ivhd_entrys.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) struct ivhd_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	u16 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	u16 cap_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	u64 mmio_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	u16 pci_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	u16 info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	u32 efr_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	/* Following only valid on IVHD type 11h and 40h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	u64 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) } __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * A device entry describing which devices a specific IOMMU translates and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  * which requestor ids they use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) struct ivhd_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	u32 ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	u32 hidh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	u64 cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	u8 uidf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	u8 uidl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	u8 uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) } __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * An AMD IOMMU memory definition structure. It defines things like exclusion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  * ranges for devices and regions that should be unity mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) struct ivmd_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	u16 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	u16 aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	u64 resv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	u64 range_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	u64 range_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) } __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) bool amd_iommu_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) bool amd_iommu_irq_remap __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static bool amd_iommu_detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static bool __initdata amd_iommu_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static int amd_iommu_target_ivhd_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) u16 amd_iommu_last_bdf;			/* largest PCI device id we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 					   to handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) LIST_HEAD(amd_iommu_unity_map);		/* a list of required unity mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 					   we find in ACPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) bool amd_iommu_unmap_flush;		/* if true, flush on every unmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 					   system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) /* Array to assign indices to IOMMUs*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) struct amd_iommu *amd_iommus[MAX_IOMMUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) /* Number of IOMMUs present in the system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static int amd_iommus_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) /* IOMMUs have a non-present cache? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) bool amd_iommu_np_cache __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) bool amd_iommu_iotlb_sup __read_mostly = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) u32 amd_iommu_max_pasid __read_mostly = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) bool amd_iommu_v2_present __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) static bool amd_iommu_pc_present __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) bool amd_iommu_force_isolation __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * Pointer to the device table which is shared by all AMD IOMMUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * it is indexed by the PCI device id or the HT unit id and contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * information about the domain the device belongs to as well as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * page table root pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) struct dev_table_entry *amd_iommu_dev_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * Pointer to a device table which the content of old device table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * will be copied to. It's only be used in kdump kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) static struct dev_table_entry *old_dev_tbl_cpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  * The alias table is a driver specific data structure which contains the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * More than one device can share the same requestor id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) u16 *amd_iommu_alias_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  * The rlookup table is used to find the IOMMU which is responsible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  * for a specific device. It is also indexed by the PCI device id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) struct amd_iommu **amd_iommu_rlookup_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) EXPORT_SYMBOL(amd_iommu_rlookup_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  * This table is used to find the irq remapping table for a given device id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * quickly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) struct irq_remap_table **irq_lookup_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * to know which ones are already in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) unsigned long *amd_iommu_pd_alloc_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static u32 dev_table_size;	/* size of the device table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) static u32 alias_table_size;	/* size of the alias table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static u32 rlookup_table_size;	/* size if the rlookup table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) enum iommu_init_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	IOMMU_START_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	IOMMU_IVRS_DETECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	IOMMU_ACPI_FINISHED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	IOMMU_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	IOMMU_PCI_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	IOMMU_INTERRUPTS_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	IOMMU_DMA_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	IOMMU_INITIALIZED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	IOMMU_NOT_FOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	IOMMU_INIT_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	IOMMU_CMDLINE_DISABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) /* Early ioapic and hpet maps from kernel command line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) #define EARLY_MAP_SIZE		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static int __initdata early_ioapic_map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) static int __initdata early_hpet_map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static int __initdata early_acpihid_map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) static bool __initdata cmdline_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static enum iommu_init_state init_state = IOMMU_START_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) static int amd_iommu_enable_interrupts(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) static int __init iommu_go_to_state(enum iommu_init_state state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) static void init_device_table_dma(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) static bool amd_iommu_pre_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static u32 amd_iommu_ivinfo __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) bool translation_pre_enabled(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) EXPORT_SYMBOL(translation_pre_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) static void clear_translation_pre_enabled(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) static void init_translation_status(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	u64 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	if (ctrl & (1<<CONTROL_IOMMU_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static inline void update_last_devid(u16 devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	if (devid > amd_iommu_last_bdf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		amd_iommu_last_bdf = devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) static inline unsigned long tbl_size(int entry_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	unsigned shift = PAGE_SHIFT +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 			 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	return 1UL << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) int amd_iommu_get_num_iommus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	return amd_iommus_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) #ifdef CONFIG_IRQ_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static bool check_feature_on_all_iommus(u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	for_each_iommu(iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		ret = iommu_feature(iommu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  * For IVHD type 0x11/0x40, EFR is also available via IVHD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  * Default to IVHD EFR since it is available sooner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  * (i.e. before PCI init).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) static void __init early_iommu_features_init(struct amd_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 					     struct ivhd_header *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		iommu->features = h->efr_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) /* Access to l1 and l2 indexed register spaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	pci_read_config_dword(iommu->dev, 0xfc, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	pci_write_config_dword(iommu->dev, 0xfc, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	pci_write_config_dword(iommu->dev, 0xf0, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	pci_read_config_dword(iommu->dev, 0xf4, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	pci_write_config_dword(iommu->dev, 0xf4, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  * AMD IOMMU MMIO register space handling functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  * These functions are used to program the IOMMU device registers in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  * MMIO space required for that driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  * This function set the exclusion range in the IOMMU. DMA accesses to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  * exclusion range are passed through untranslated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static void iommu_set_exclusion_range(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	u64 start = iommu->exclusion_start & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	u64 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	if (!iommu->exclusion_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	entry = start | MMIO_EXCL_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			&entry, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	entry = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			&entry, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) static void iommu_set_cwwb_range(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	u64 entry = start & PM_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (!iommu_feature(iommu, FEATURE_SNP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	/* Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	 * Re-purpose Exclusion base/limit registers for Completion wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	 * write-back base/limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		    &entry, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	/* Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	 * Default to 4 Kbytes, which can be specified by setting base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	 * address equal to the limit address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		    &entry, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) /* Programs the physical address of the device table into the IOMMU hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) static void iommu_set_device_table(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	u64 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	BUG_ON(iommu->mmio_base == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	entry = iommu_virt_to_phys(amd_iommu_dev_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	entry |= (dev_table_size >> 12) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			&entry, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) /* Generic functions to enable/disable certain features of the IOMMU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	u64 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	ctrl = readq(iommu->mmio_base +  MMIO_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	ctrl |= (1ULL << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	writeq(ctrl, iommu->mmio_base +  MMIO_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	u64 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	ctrl &= ~(1ULL << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	u64 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	ctrl &= ~CTRL_INV_TO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) /* Function to enable the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) static void iommu_enable(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) static void iommu_disable(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	if (!iommu->mmio_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	/* Disable command buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	/* Disable event logging and event interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	/* Disable IOMMU GA_LOG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	iommu_feature_disable(iommu, CONTROL_GALOG_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	iommu_feature_disable(iommu, CONTROL_GAINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	/* Disable IOMMU hardware itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  * the system has one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	if (!request_mem_region(address, end, "amd_iommu")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		pr_err("Can not reserve memory region %llx-%llx for mmio\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			address, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	return (u8 __iomem *)ioremap(address, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (iommu->mmio_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		iounmap(iommu->mmio_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) static inline u32 get_ivhd_header_size(struct ivhd_header *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	u32 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	switch (h->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		size = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	case 0x11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	case 0x40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		size = 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  * The functions below belong to the first pass of AMD IOMMU ACPI table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  * parsing. In this pass we try to find out the highest device id this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  * code has to handle. Upon this information the size of the shared data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * structures is determined later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  * This function calculates the length of a given IVHD entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static inline int ivhd_entry_length(u8 *ivhd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	u32 type = ((struct ivhd_entry *)ivhd)->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (type < 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		return 0x04 << (*ivhd >> 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	} else if (type == IVHD_DEV_ACPI_HID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		/* For ACPI_HID, offset 21 is uid len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		return *((u8 *)ivhd + 21) + 22;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  * After reading the highest device id from the IOMMU PCI capability header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * this function looks if there is a higher device id defined in the ACPI table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	u8 *p = (void *)h, *end = (void *)h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	struct ivhd_entry *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	u32 ivhd_size = get_ivhd_header_size(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	if (!ivhd_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		pr_err("Unsupported IVHD type %#x\n", h->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	p += ivhd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	end += h->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	while (p < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		dev = (struct ivhd_entry *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		switch (dev->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		case IVHD_DEV_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			/* Use maximum BDF value for DEV_ALL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			update_last_devid(0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		case IVHD_DEV_SELECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		case IVHD_DEV_RANGE_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		case IVHD_DEV_ALIAS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		case IVHD_DEV_EXT_SELECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			/* all the above subfield types refer to device ids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			update_last_devid(dev->devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		p += ivhd_entry_length(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	WARN_ON(p != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) static int __init check_ivrs_checksum(struct acpi_table_header *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	u8 checksum = 0, *p = (u8 *)table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	for (i = 0; i < table->length; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		checksum += p[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	if (checksum != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		/* ACPI table corrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		pr_err(FW_BUG "IVRS invalid checksum\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * Iterate over all IVHD entries in the ACPI table and find the highest device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  * id which we need to handle. This is the first of three functions which parse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  * the ACPI table. So we check the checksum here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) static int __init find_last_devid_acpi(struct acpi_table_header *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	u8 *p = (u8 *)table, *end = (u8 *)table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	struct ivhd_header *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	p += IVRS_HEADER_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	end += table->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	while (p < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		h = (struct ivhd_header *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		if (h->type == amd_iommu_target_ivhd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			int ret = find_last_devid_from_ivhd(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		p += h->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	WARN_ON(p != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * The following functions belong to the code path which parses the ACPI table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * the second time. In this ACPI parsing iteration we allocate IOMMU specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * data structures, initialize the device/alias/rlookup table and also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * basically initialize the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  * Allocates the command buffer. This buffer is per AMD IOMMU. We can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  * write commands to that buffer later and the IOMMU will execute them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  * asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) static int __init alloc_command_buffer(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 						  get_order(CMD_BUFFER_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	return iommu->cmd_buf ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  * This function restarts event logging in case the IOMMU experienced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  * an event log buffer overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  * This function resets the command buffer if the IOMMU stopped fetching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671)  * commands from it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	iommu->cmd_buf_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	iommu->cmd_buf_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  * This function writes the command buffer address to the hardware and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  * enables it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) static void iommu_enable_command_buffer(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	u64 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	BUG_ON(iommu->cmd_buf == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	entry = iommu_virt_to_phys(iommu->cmd_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	entry |= MMIO_CMD_SIZE_512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		    &entry, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	amd_iommu_reset_cmd_buffer(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * This function disables the command buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) static void iommu_disable_command_buffer(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static void __init free_command_buffer(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 					 gfp_t gfp, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	int order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	void *buf = (void *)__get_free_pages(gfp, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	if (buf &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	    iommu_feature(iommu, FEATURE_SNP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	    set_memory_4k((unsigned long)buf, (1 << order))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		free_pages((unsigned long)buf, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) /* allocates the memory where the IOMMU will log its events to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) static int __init alloc_event_buffer(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 					      EVT_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	return iommu->evt_buf ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) static void iommu_enable_event_buffer(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	u64 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	BUG_ON(iommu->evt_buf == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		    &entry, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	/* set head and tail to zero manually */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * This function disables the event log buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) static void iommu_disable_event_buffer(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) static void __init free_event_buffer(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) /* allocates the memory where the IOMMU will log its events to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) static int __init alloc_ppr_log(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 					      PPR_LOG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	return iommu->ppr_log ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) static void iommu_enable_ppr_log(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	u64 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	if (iommu->ppr_log == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		    &entry, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	/* set head and tail to zero manually */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	iommu_feature_enable(iommu, CONTROL_PPR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) static void __init free_ppr_log(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) static void free_ga_log(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) #ifdef CONFIG_IRQ_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) static int iommu_ga_log_enable(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) #ifdef CONFIG_IRQ_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	u32 status, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	u64 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (!iommu->ga_log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	/* Check if already running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		    &entry, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		 (BIT_ULL(52)-1)) & ~7ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		    &entry, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	iommu_feature_enable(iommu, CONTROL_GAINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	iommu_feature_enable(iommu, CONTROL_GALOG_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	for (i = 0; i < LOOP_TIMEOUT; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		if (status & (MMIO_STATUS_GALOG_RUN_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	if (WARN_ON(i >= LOOP_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) #endif /* CONFIG_IRQ_REMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) static int iommu_init_ga_log(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) #ifdef CONFIG_IRQ_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 					get_order(GA_LOG_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if (!iommu->ga_log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 					get_order(8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (!iommu->ga_log_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	free_ga_log(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) #endif /* CONFIG_IRQ_REMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	return iommu->cmd_sem ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) static void __init free_cwwb_sem(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	if (iommu->cmd_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		free_page((unsigned long)iommu->cmd_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) static void iommu_enable_xt(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) #ifdef CONFIG_IRQ_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	 * XT mode (32-bit APIC destination ID) requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	 * GA mode (128-bit IRTE support) as a prerequisite.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	    amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		iommu_feature_enable(iommu, CONTROL_XT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) #endif /* CONFIG_IRQ_REMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) static void iommu_enable_gt(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if (!iommu_feature(iommu, FEATURE_GT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	iommu_feature_enable(iommu, CONTROL_GT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) /* sets a specific bit in the device table entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) static void set_dev_entry_bit(u16 devid, u8 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	int i = (bit >> 6) & 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	int _bit = bit & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) static int get_dev_entry_bit(u16 devid, u8 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	int i = (bit >> 6) & 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	int _bit = bit & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) static bool copy_device_table(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct dev_table_entry *old_devtb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	u32 lo, hi, devid, old_devtb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	phys_addr_t old_devtb_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	u16 dom_id, dte_v, irq_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	gfp_t gfp_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	u64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (!amd_iommu_pre_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	pr_warn("Translation is already enabled - trying to copy translation structures\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	for_each_iommu(iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		/* All IOMMUs should use the same device table with the same size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		entry = (((u64) hi) << 32) + lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		if (last_entry && last_entry != entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			pr_err("IOMMU:%d should use the same dev table as others!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 				iommu->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		last_entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		if (old_devtb_size != dev_table_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			pr_err("The device table size of IOMMU:%d is not expected!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 				iommu->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * When SME is enabled in the first kernel, the entry includes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 * memory encryption mask(sme_me_mask), we must remove the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 * encryption mask to obtain the true physical address in kdump kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (old_devtb_phys >= 0x100000000ULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		pr_err("The address of old device table is above 4G, not trustworthy!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	old_devtb = (sme_active() && is_kdump_kernel())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		    ? (__force void *)ioremap_encrypted(old_devtb_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 							dev_table_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		    : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	if (!old_devtb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 				get_order(dev_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	if (old_dev_tbl_cpy == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		pr_err("Failed to allocate memory for copying old device table!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		old_dev_tbl_cpy[devid] = old_devtb[devid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		if (dte_v && dom_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			__set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			/* If gcr3 table existed, mask it out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 				tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 				old_dev_tbl_cpy[devid].data[1] &= ~tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 				tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 				tmp |= DTE_FLAG_GV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 				old_dev_tbl_cpy[devid].data[0] &= ~tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		if (irq_v && (int_ctl || int_tab_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			    (int_tab_len != DTE_IRQ_TABLE_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 				pr_err("Wrong old irq remapping flag: %#x\n", devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		        old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	memunmap(old_devtb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) void amd_iommu_apply_erratum_63(u16 devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	int sysmgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (sysmgt == 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		set_dev_entry_bit(devid, DEV_ENTRY_IW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* Writes the specific IOMMU for a device into the rlookup table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	amd_iommu_rlookup_table[devid] = iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)  * This function takes the device specific flags read from the ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)  * table and sets up the device table entry with that information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 					   u16 devid, u32 flags, u32 ext_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (flags & ACPI_DEVFLAG_INITPASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	if (flags & ACPI_DEVFLAG_EXTINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (flags & ACPI_DEVFLAG_NMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (flags & ACPI_DEVFLAG_SYSMGT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	if (flags & ACPI_DEVFLAG_SYSMGT2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	if (flags & ACPI_DEVFLAG_LINT0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (flags & ACPI_DEVFLAG_LINT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	amd_iommu_apply_erratum_63(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	set_iommu_for_device(iommu, devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	struct devid_map *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	struct list_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	if (type == IVHD_SPECIAL_IOAPIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		list = &ioapic_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	else if (type == IVHD_SPECIAL_HPET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		list = &hpet_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	list_for_each_entry(entry, list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		if (!(entry->id == id && entry->cmd_line))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		pr_info("Command-line override present for %s id %d - ignoring\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		*devid = entry->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	entry->id	= id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	entry->devid	= *devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	entry->cmd_line	= cmd_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	list_add_tail(&entry->list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 				      bool cmd_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	struct acpihid_map_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	struct list_head *list = &acpihid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	list_for_each_entry(entry, list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		if (strcmp(entry->hid, hid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		    (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		    !entry->cmd_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		pr_info("Command-line override for hid:%s uid:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			hid, uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		*devid = entry->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	memcpy(entry->uid, uid, strlen(uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	memcpy(entry->hid, hid, strlen(hid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	entry->devid = *devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	entry->cmd_line	= cmd_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	entry->root_devid = (entry->devid & (~0x7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		entry->cmd_line ? "cmd" : "ivrs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		entry->hid, entry->uid, entry->root_devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	list_add_tail(&entry->list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static int __init add_early_maps(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	for (i = 0; i < early_ioapic_map_size; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		ret = add_special_device(IVHD_SPECIAL_IOAPIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 					 early_ioapic_map[i].id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 					 &early_ioapic_map[i].devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 					 early_ioapic_map[i].cmd_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	for (i = 0; i < early_hpet_map_size; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		ret = add_special_device(IVHD_SPECIAL_HPET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 					 early_hpet_map[i].id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 					 &early_hpet_map[i].devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 					 early_hpet_map[i].cmd_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	for (i = 0; i < early_acpihid_map_size; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		ret = add_acpi_hid_device(early_acpihid_map[i].hid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 					  early_acpihid_map[i].uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 					  &early_acpihid_map[i].devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 					  early_acpihid_map[i].cmd_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * Takes a pointer to an AMD IOMMU entry in the ACPI table and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  * initializes the hardware and our data structures with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 					struct ivhd_header *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	u8 *p = (u8 *)h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	u8 *end = p, flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	u16 devid = 0, devid_start = 0, devid_to = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	u32 dev_i, ext_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	bool alias = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	struct ivhd_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	u32 ivhd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	ret = add_early_maps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	amd_iommu_apply_ivrs_quirks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	 * First save the recommended feature enable bits from ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	iommu->acpi_flags = h->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	 * Done. Now parse the device entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	ivhd_size = get_ivhd_header_size(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	if (!ivhd_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		pr_err("Unsupported IVHD type %#x\n", h->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	p += ivhd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	end += h->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	while (p < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		e = (struct ivhd_entry *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		switch (e->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		case IVHD_DEV_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			DUMP_printk("  DEV_ALL\t\t\tflags: %02x\n", e->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 				set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		case IVHD_DEV_SELECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 			DUMP_printk("  DEV_SELECT\t\t\t devid: %02x:%02x.%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 				    "flags: %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 				    PCI_BUS_NUM(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 				    PCI_SLOT(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 				    PCI_FUNC(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 				    e->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			devid = e->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		case IVHD_DEV_SELECT_RANGE_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			DUMP_printk("  DEV_SELECT_RANGE_START\t "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 				    "devid: %02x:%02x.%x flags: %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 				    PCI_BUS_NUM(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 				    PCI_SLOT(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 				    PCI_FUNC(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 				    e->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 			devid_start = e->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			flags = e->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			ext_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			alias = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		case IVHD_DEV_ALIAS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			DUMP_printk("  DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 				    "flags: %02x devid_to: %02x:%02x.%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 				    PCI_BUS_NUM(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 				    PCI_SLOT(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 				    PCI_FUNC(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 				    e->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 				    PCI_BUS_NUM(e->ext >> 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 				    PCI_SLOT(e->ext >> 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 				    PCI_FUNC(e->ext >> 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			devid = e->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			devid_to = e->ext >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 			set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			amd_iommu_alias_table[devid] = devid_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		case IVHD_DEV_ALIAS_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 			DUMP_printk("  DEV_ALIAS_RANGE\t\t "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 				    "devid: %02x:%02x.%x flags: %02x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 				    "devid_to: %02x:%02x.%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 				    PCI_BUS_NUM(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 				    PCI_SLOT(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 				    PCI_FUNC(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 				    e->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 				    PCI_BUS_NUM(e->ext >> 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 				    PCI_SLOT(e->ext >> 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 				    PCI_FUNC(e->ext >> 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			devid_start = e->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			flags = e->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			devid_to = e->ext >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			ext_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 			alias = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		case IVHD_DEV_EXT_SELECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 			DUMP_printk("  DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 				    "flags: %02x ext: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 				    PCI_BUS_NUM(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 				    PCI_SLOT(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 				    PCI_FUNC(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 				    e->flags, e->ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			devid = e->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 			set_dev_entry_from_acpi(iommu, devid, e->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 						e->ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		case IVHD_DEV_EXT_SELECT_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			DUMP_printk("  DEV_EXT_SELECT_RANGE\t devid: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 				    "%02x:%02x.%x flags: %02x ext: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 				    PCI_BUS_NUM(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 				    PCI_SLOT(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 				    PCI_FUNC(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 				    e->flags, e->ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 			devid_start = e->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			flags = e->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			ext_flags = e->ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			alias = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		case IVHD_DEV_RANGE_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			DUMP_printk("  DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 				    PCI_BUS_NUM(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 				    PCI_SLOT(e->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 				    PCI_FUNC(e->devid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			devid = e->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 				if (alias) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 					amd_iommu_alias_table[dev_i] = devid_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 					set_dev_entry_from_acpi(iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 						devid_to, flags, ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 				set_dev_entry_from_acpi(iommu, dev_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 							flags, ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		case IVHD_DEV_SPECIAL: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			u8 handle, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			const char *var;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			handle = e->ext & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			devid  = (e->ext >>  8) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			type   = (e->ext >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 			if (type == IVHD_SPECIAL_IOAPIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 				var = "IOAPIC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			else if (type == IVHD_SPECIAL_HPET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 				var = "HPET";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 				var = "UNKNOWN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 				    var, (int)handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 				    PCI_BUS_NUM(devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 				    PCI_SLOT(devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 				    PCI_FUNC(devid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			ret = add_special_device(type, handle, &devid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			 * add_special_device might update the devid in case a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			 * command-line override is present. So call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			 * set_dev_entry_from_acpi after add_special_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		case IVHD_DEV_ACPI_HID: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			u8 hid[ACPIHID_HID_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			u8 uid[ACPIHID_UID_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			if (h->type != 0x40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 				pr_err(FW_BUG "Invalid IVHD device type %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 				       e->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 			hid[ACPIHID_HID_LEN - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			if (!(*hid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 				pr_err(FW_BUG "Invalid HID.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			uid[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 			switch (e->uidf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			case UID_NOT_PRESENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 				if (e->uidl != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 					pr_warn(FW_BUG "Invalid UID length.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			case UID_IS_INTEGER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 				sprintf(uid, "%d", e->uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			case UID_IS_CHARACTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 				memcpy(uid, &e->uid, e->uidl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 				uid[e->uidl] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			devid = e->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 			DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 				    hid, uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 				    PCI_BUS_NUM(devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 				    PCI_SLOT(devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 				    PCI_FUNC(devid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			flags = e->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 			ret = add_acpi_hid_device(hid, uid, &devid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			 * add_special_device might update the devid in case a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 			 * command-line override is present. So call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 			 * set_dev_entry_from_acpi after add_special_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		p += ivhd_entry_length(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static void __init free_iommu_one(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	free_cwwb_sem(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	free_command_buffer(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	free_event_buffer(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	free_ppr_log(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	free_ga_log(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	iommu_unmap_mmio_space(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static void __init free_iommu_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	struct amd_iommu *iommu, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	for_each_iommu_safe(iommu, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		list_del(&iommu->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		free_iommu_one(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		kfree(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)  * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)  * Workaround:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)  *     BIOS should disable L2B micellaneous clock gating by setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)  *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	if ((boot_cpu_data.x86 != 0x15) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	    (boot_cpu_data.x86_model < 0x10) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	    (boot_cpu_data.x86_model > 0x1f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	pci_read_config_dword(iommu->dev, 0xf4, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	if (value & BIT(2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	/* Select NB indirect register 0x90 and enable writing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	pci_info(iommu->dev, "Applying erratum 746 workaround\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	/* Clear the enable writing bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)  * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)  * Workaround:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)  *     BIOS should enable ATS write permission check by setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)  *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	if ((boot_cpu_data.x86 != 0x15) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	    (boot_cpu_data.x86_model < 0x30) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	    (boot_cpu_data.x86_model > 0x3f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	/* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	value = iommu_read_l2(iommu, 0x47);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	if (value & BIT(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	iommu_write_l2(iommu, 0x47, value | BIT(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	pci_info(iommu->dev, "Applying ATS write check workaround\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)  * This function clues the initialization function for one IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)  * together and also allocates the command buffer and programs the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)  * hardware. It does NOT enable the IOMMU. This is done afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	raw_spin_lock_init(&iommu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	iommu->cmd_sem_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	/* Add IOMMU to internal data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	list_add_tail(&iommu->list, &amd_iommu_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	iommu->index = amd_iommus_present++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (unlikely(iommu->index >= MAX_IOMMUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		WARN(1, "System has more IOMMUs than supported by this driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	/* Index is fine - add IOMMU to the array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	amd_iommus[iommu->index] = iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	 * Copy data from ACPI table entry to the iommu struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	iommu->devid   = h->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	iommu->cap_ptr = h->cap_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	iommu->pci_seg = h->pci_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	iommu->mmio_phys = h->mmio_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	switch (h->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		/* Check if IVHD EFR contains proper max banks/counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		if ((h->efr_attr != 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		    ((h->efr_attr & (0xF << 13)) != 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		    ((h->efr_attr & (0x3F << 17)) != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		 * GAM also requires GA mode. Therefore, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		 * check cmpxchg16b support before enabling it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		if (!boot_cpu_has(X86_FEATURE_CX16) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		    ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	case 0x11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	case 0x40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		if (h->efr_reg & (1 << 9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		 * XT, GAM also requires GA mode. Therefore, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		 * check cmpxchg16b support before enabling them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		if (!boot_cpu_has(X86_FEATURE_CX16) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		    ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		 * Note: Since iommu_update_intcapxt() leverages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		 * the IOMMU MMIO access to MSI capability block registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		 * for MSI address lo/hi/data, we need to check both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		    (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 			amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		early_iommu_features_init(iommu, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 						iommu->mmio_phys_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	if (!iommu->mmio_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	if (alloc_cwwb_sem(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	if (alloc_command_buffer(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	if (alloc_event_buffer(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	iommu->int_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	init_translation_status(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		iommu_disable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		clear_translation_pre_enabled(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			iommu->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	if (amd_iommu_pre_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		amd_iommu_pre_enabled = translation_pre_enabled(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	ret = init_iommu_from_acpi(iommu, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	ret = amd_iommu_create_irq_domain(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	 * Make sure IOMMU is not considered to translate itself. The IVRS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	 * table tells us so, but this is a lie!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	amd_iommu_rlookup_table[iommu->devid] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)  * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)  * @ivrs: Pointer to the IVRS header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)  * This function search through all IVDB of the maximum supported IVHD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	u8 *base = (u8 *)ivrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	struct ivhd_header *ivhd = (struct ivhd_header *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 					(base + IVRS_HEADER_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	u8 last_type = ivhd->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	u16 devid = ivhd->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	while (((u8 *)ivhd - base < ivrs->length) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	       (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		u8 *p = (u8 *) ivhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		if (ivhd->devid == devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			last_type = ivhd->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		ivhd = (struct ivhd_header *)(p + ivhd->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	return last_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)  * Iterates over all IOMMU entries in the ACPI table, allocates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)  * IOMMU structure and initializes it with init_iommu_one()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) static int __init init_iommu_all(struct acpi_table_header *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	u8 *p = (u8 *)table, *end = (u8 *)table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	struct ivhd_header *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	end += table->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	p += IVRS_HEADER_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	while (p < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		h = (struct ivhd_header *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		if (*p == amd_iommu_target_ivhd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			DUMP_printk("device: %02x:%02x.%01x cap: %04x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 				    "seg: %d flags: %01x info %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 				    PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 				    PCI_FUNC(h->devid), h->cap_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 				    h->pci_seg, h->flags, h->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			DUMP_printk("       mmio-addr: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 				    h->mmio_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 			iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 			if (iommu == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 			ret = init_iommu_one(iommu, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		p += h->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	WARN_ON(p != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) static void init_iommu_perf_ctr(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	struct pci_dev *pdev = iommu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	if (!iommu_feature(iommu, FEATURE_PC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	amd_iommu_pc_present = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	pci_info(pdev, "IOMMU performance counters supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	iommu->max_banks = (u8) ((val >> 12) & 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	iommu->max_counters = (u8) ((val >> 7) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static ssize_t amd_iommu_show_cap(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 				  struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 				  char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	struct amd_iommu *iommu = dev_to_amd_iommu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	return sprintf(buf, "%x\n", iommu->cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) static ssize_t amd_iommu_show_features(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 				       struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 				       char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	struct amd_iommu *iommu = dev_to_amd_iommu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	return sprintf(buf, "%llx\n", iommu->features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) static struct attribute *amd_iommu_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	&dev_attr_cap.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	&dev_attr_features.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) static struct attribute_group amd_iommu_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	.name = "amd-iommu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	.attrs = amd_iommu_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) static const struct attribute_group *amd_iommu_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	&amd_iommu_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  * Note: IVHD 0x11 and 0x40 also contains exact copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)  * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)  * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static void __init late_iommu_features_init(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	u64 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	/* read extended feature bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	if (!iommu->features) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		iommu->features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	 * Sanity check and warn if EFR values from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	 * IVHD and MMIO conflict.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	if (features != iommu->features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 			features, iommu->features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) static int __init iommu_init_pci(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	int cap_ptr = iommu->cap_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 						 iommu->devid & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	if (!iommu->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	/* Prevent binding other PCI device drivers to IOMMU devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	iommu->dev->match_driver = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			      &iommu->cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		amd_iommu_iotlb_sup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	late_iommu_features_init(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	if (iommu_feature(iommu, FEATURE_GT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		int glxval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		u32 max_pasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		u64 pasmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		pasmax = iommu->features & FEATURE_PASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		pasmax >>= FEATURE_PASID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		max_pasid  = (1 << (pasmax + 1)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		glxval   = iommu->features & FEATURE_GLXVAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		glxval >>= FEATURE_GLXVAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		if (amd_iommu_max_glx_val == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			amd_iommu_max_glx_val = glxval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	if (iommu_feature(iommu, FEATURE_GT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	    iommu_feature(iommu, FEATURE_PPR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		iommu->is_iommu_v2   = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		amd_iommu_v2_present = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	ret = iommu_init_ga_log(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		amd_iommu_np_cache = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	init_iommu_perf_ctr(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	if (is_rd890_iommu(iommu->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		iommu->root_pdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 			pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 						    PCI_DEVFN(0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		 * Some rd890 systems may not be fully reconfigured by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		 * BIOS, so it's necessary for us to store this information so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		 * it can be reprogrammed on resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 				&iommu->stored_addr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 				&iommu->stored_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		/* Low bit locks writes to configuration space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		iommu->stored_addr_lo &= ~1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 			for (j = 0; j < 0x12; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 				iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		for (i = 0; i < 0x83; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 			iommu->stored_l2[i] = iommu_read_l2(iommu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	amd_iommu_erratum_746_workaround(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	amd_iommu_ats_write_check_workaround(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 			       amd_iommu_groups, "ivhd%d", iommu->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	iommu_device_register(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	return pci_enable_device(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) static void print_iommu_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	static const char * const feat_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		"IA", "GA", "HE", "PC"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	for_each_iommu(iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		struct pci_dev *pdev = iommu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			pr_info("Extended features (%#llx):", iommu->features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 			for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 				if (iommu_feature(iommu, (1ULL << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 					pr_cont(" %s", feat_str[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 			if (iommu->features & FEATURE_GAM_VAPIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 				pr_cont(" GA_vAPIC");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 			pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	if (irq_remapping_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 		pr_info("Interrupt remapping enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 			pr_info("Virtual APIC enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 		if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 			pr_info("X2APIC enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) static int __init amd_iommu_init_pci(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	for_each_iommu(iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		ret = iommu_init_pci(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		/* Need to setup range after PCI init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		iommu_set_cwwb_range(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	 * Order is important here to make sure any unity map requirements are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	 * fulfilled. The unity mappings are created and written to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	 * table during the amd_iommu_init_api() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	 * After that we call init_device_table_dma() to make sure any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	 * uninitialized DTE will block DMA, and in the end we flush the caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	 * of all IOMMUs to make sure the changes to the device table are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	 * active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	ret = amd_iommu_init_api();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	init_device_table_dma();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	for_each_iommu(iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		iommu_flush_all_caches(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		print_iommu_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)  * The following functions initialize the MSI interrupts for all IOMMUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)  * in the system. It's a bit challenging because there could be multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)  * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)  * pci_dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)  ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) static int iommu_setup_msi(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	r = pci_enable_msi(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	r = request_threaded_irq(iommu->dev->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 				 amd_iommu_int_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 				 amd_iommu_int_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 				 0, "AMD-Vi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 				 iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		pci_disable_msi(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	iommu->int_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) #define XT_INT_DEST_MODE(x)	(((x) & 0x1ULL) << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) #define XT_INT_DEST_LO(x)	(((x) & 0xFFFFFFULL) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) #define XT_INT_VEC(x)		(((x) & 0xFFULL) << 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) #define XT_INT_DEST_HI(x)	((((x) >> 24) & 0xFFULL) << 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)  * Setup the IntCapXT registers with interrupt routing information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)  * based on the PCI MSI capability block registers, accessed via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)  * MMIO MSI address low/hi and MSI data registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) static void iommu_update_intcapxt(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	u32 data    = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	bool dm     = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	u32 dest    = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	if (x2apic_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	val = XT_INT_VEC(data & 0xFF) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	      XT_INT_DEST_MODE(dm) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	      XT_INT_DEST_LO(dest) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	      XT_INT_DEST_HI(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	 * Current IOMMU implemtation uses the same IRQ for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	 * 3 IOMMU interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) static void _irq_notifier_notify(struct irq_affinity_notify *notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 				 const cpumask_t *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	for_each_iommu(iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		if (iommu->dev->irq == notify->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 			iommu_update_intcapxt(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) static void _irq_notifier_release(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) static int iommu_init_intcapxt(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	 * which can be inferred from amd_iommu_xt_mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	 * Also, we need to setup notifier to update the IntCapXT registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	 * whenever the irq affinity is changed from user-space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	notify->irq = iommu->dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	notify->notify = _irq_notifier_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	notify->release = _irq_notifier_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		       iommu->devid, iommu->dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	iommu_update_intcapxt(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static int iommu_init_msi(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	if (iommu->int_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		goto enable_faults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	if (iommu->dev->msi_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		ret = iommu_setup_msi(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) enable_faults:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	ret = iommu_init_intcapxt(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	if (iommu->ppr_log != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	iommu_ga_log_enable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)  * The next functions belong to the third pass of parsing the ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)  * table. In this last pass the memory mapping requirements are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)  * gathered (like exclusion and unity mapping ranges).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) static void __init free_unity_maps(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	struct unity_map_entry *entry, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		list_del(&entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) /* called for unity map ACPI definition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) static int __init init_unity_map_range(struct ivmd_header *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	struct unity_map_entry *e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	char *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	e = kzalloc(sizeof(*e), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	if (e == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	switch (m->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	case ACPI_IVMD_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		s = "IVMD_TYPEi\t\t\t";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		e->devid_start = e->devid_end = m->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	case ACPI_IVMD_TYPE_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		s = "IVMD_TYPE_ALL\t\t";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		e->devid_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		e->devid_end = amd_iommu_last_bdf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	case ACPI_IVMD_TYPE_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		s = "IVMD_TYPE_RANGE\t\t";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		e->devid_start = m->devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		e->devid_end = m->aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	e->address_start = PAGE_ALIGN(m->range_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	e->prot = m->flags >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	 * Treat per-device exclusion ranges as r/w unity-mapped regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	 * since some buggy BIOSes might lead to the overwritten exclusion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	 * range (exclusion_start and exclusion_length members). This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	 * happens when there are multiple exclusion ranges (IVMD entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	 * defined in ACPI table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	if (m->flags & IVMD_FLAG_EXCL_RANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		    " range_start: %016llx range_end: %016llx flags: %x\n", s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 		    PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		    PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		    PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		    e->address_start, e->address_end, m->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	list_add_tail(&e->list, &amd_iommu_unity_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) /* iterates over all memory definitions we find in the ACPI table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) static int __init init_memory_definitions(struct acpi_table_header *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	u8 *p = (u8 *)table, *end = (u8 *)table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	struct ivmd_header *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	end += table->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	p += IVRS_HEADER_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	while (p < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		m = (struct ivmd_header *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			init_unity_map_range(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 		p += m->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)  * Init the device table to not allow DMA access for devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) static void init_device_table_dma(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	u32 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		set_dev_entry_bit(devid, DEV_ENTRY_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) static void __init uninit_device_table_dma(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	u32 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		amd_iommu_dev_table[devid].data[0] = 0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		amd_iommu_dev_table[devid].data[1] = 0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) static void init_device_table(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	u32 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	if (!amd_iommu_irq_remap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) static void iommu_init_flags(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		iommu_feature_disable(iommu, CONTROL_ISOC_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	 * make IOMMU memory accesses cache coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	/* Set IOTLB invalidation timeout to 1s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	u32 ioc_feature_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	struct pci_dev *pdev = iommu->root_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	/* RD890 BIOSes may not have completely reconfigured the iommu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	if (!is_rd890_iommu(iommu->dev) || !pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	 * First, we need to ensure that the iommu is enabled. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	 * controlled by a register in the northbridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	/* Select Northbridge indirect register 0x75 and enable writing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	/* Enable the iommu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	if (!(ioc_feature_control & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	/* Restore the iommu BAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 			       iommu->stored_addr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 			       iommu->stored_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	/* Restore the l1 indirect regs for each of the 6 l1s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		for (j = 0; j < 0x12; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	/* Restore the l2 indirect regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	for (i = 0; i < 0x83; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		iommu_write_l2(iommu, i, iommu->stored_l2[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	/* Lock PCI setup registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 			       iommu->stored_addr_lo | 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) static void iommu_enable_ga(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) #ifdef CONFIG_IRQ_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	switch (amd_iommu_guest_ir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	case AMD_IOMMU_GUEST_IR_VAPIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		iommu_feature_enable(iommu, CONTROL_GAM_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	case AMD_IOMMU_GUEST_IR_LEGACY_GA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		iommu_feature_enable(iommu, CONTROL_GA_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		iommu->irte_ops = &irte_128_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		iommu->irte_ops = &irte_32_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) static void early_enable_iommu(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	iommu_disable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	iommu_init_flags(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	iommu_set_device_table(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	iommu_enable_command_buffer(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	iommu_enable_event_buffer(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	iommu_set_exclusion_range(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	iommu_enable_ga(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	iommu_enable_xt(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	iommu_enable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	iommu_flush_all_caches(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)  * This function finally enables all IOMMUs found in the system after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)  * they have been initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)  * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)  * the old content of device table entries. Not this case or copy failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)  * just continue as normal kernel does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) static void early_enable_iommus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	if (!copy_device_table()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		 * If come here because of failure in copying device table from old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		 * kernel with all IOMMUs enabled, print error message and try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		 * free allocated old_dev_tbl_cpy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 		if (amd_iommu_pre_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 			pr_err("Failed to copy DEV table from previous kernel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 		if (old_dev_tbl_cpy != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 			free_pages((unsigned long)old_dev_tbl_cpy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 					get_order(dev_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 		for_each_iommu(iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 			clear_translation_pre_enabled(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 			early_enable_iommu(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		pr_info("Copied DEV table from previous kernel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 		free_pages((unsigned long)amd_iommu_dev_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 				get_order(dev_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		amd_iommu_dev_table = old_dev_tbl_cpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		for_each_iommu(iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 			iommu_disable_command_buffer(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 			iommu_disable_event_buffer(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 			iommu_enable_command_buffer(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 			iommu_enable_event_buffer(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 			iommu_enable_ga(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 			iommu_enable_xt(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 			iommu_set_device_table(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 			iommu_flush_all_caches(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) #ifdef CONFIG_IRQ_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	 * Note: We have already checked GASup from IVRS table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	 *       Now, we need to make sure that GAMSup is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	    !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 		amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) static void enable_iommus_v2(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	for_each_iommu(iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		iommu_enable_ppr_log(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		iommu_enable_gt(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) static void enable_iommus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	early_enable_iommus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	enable_iommus_v2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) static void disable_iommus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	for_each_iommu(iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		iommu_disable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) #ifdef CONFIG_IRQ_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)  * Suspend/Resume support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)  * disable suspend until real resume implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) static void amd_iommu_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	for_each_iommu(iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		iommu_apply_resume_quirks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	/* re-load the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	enable_iommus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	amd_iommu_enable_interrupts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) static int amd_iommu_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	/* disable IOMMUs to go out of the way for BIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	disable_iommus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) static struct syscore_ops amd_iommu_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	.suspend = amd_iommu_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	.resume = amd_iommu_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) static void __init free_iommu_resources(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	kmemleak_free(irq_lookup_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	free_pages((unsigned long)irq_lookup_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 		   get_order(rlookup_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	irq_lookup_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	kmem_cache_destroy(amd_iommu_irq_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	amd_iommu_irq_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	free_pages((unsigned long)amd_iommu_rlookup_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		   get_order(rlookup_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	amd_iommu_rlookup_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	free_pages((unsigned long)amd_iommu_alias_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 		   get_order(alias_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	amd_iommu_alias_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	free_pages((unsigned long)amd_iommu_dev_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		   get_order(dev_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	amd_iommu_dev_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	free_iommu_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) /* SB IOAPIC is always on this device in AMD systems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) #define IOAPIC_SB_DEVID		((0x00 << 8) | PCI_DEVFN(0x14, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) static bool __init check_ioapic_information(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	const char *fw_bug = FW_BUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	bool ret, has_sb_ioapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	has_sb_ioapic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	ret           = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	 * If we have map overrides on the kernel command line the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	 * messages in this function might not describe firmware bugs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	 * anymore - so be careful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	if (cmdline_maps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 		fw_bug = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	for (idx = 0; idx < nr_ioapics; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		int devid, id = mpc_ioapic_id(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		devid = get_ioapic_devid(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		if (devid < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 			pr_err("%s: IOAPIC[%d] not in IVRS table\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 				fw_bug, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 			ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		} else if (devid == IOAPIC_SB_DEVID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 			has_sb_ioapic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 			ret           = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	if (!has_sb_ioapic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		 * We expect the SB IOAPIC to be listed in the IVRS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		 * table. The system timer is connected to the SB IOAPIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		 * and if we don't have it in the list the system will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		 * panic at boot time.  This situation usually happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		 * when the BIOS is buggy and provides us the wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		 * device id for the IOAPIC in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 		pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		pr_err("Disabling interrupt remapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) static void __init free_dma_resources(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 		   get_order(MAX_DOMAIN_ID/8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	amd_iommu_pd_alloc_bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	free_unity_maps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) static void __init ivinfo_init(void *ivrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)  * This is the hardware init function for AMD IOMMU in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)  * This function is called either from amd_iommu_init or from the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)  * remapping setup code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)  * This function basically parses the ACPI table for AMD IOMMU (IVRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)  * four times:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)  *	1 pass) Discover the most comprehensive IVHD type to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)  *	2 pass) Find the highest PCI device id the driver has to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)  *		Upon this information the size of the data structures is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)  *		determined that needs to be allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)  *	3 pass) Initialize the data structures just allocated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)  *		information in the ACPI table about available AMD IOMMUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)  *		in the system. It also maps the PCI devices in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)  *		system to specific IOMMUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)  *	4 pass) After the basic data structures are allocated and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)  *		initialized we update them with information about memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)  *		remapping requirements parsed out of the ACPI table in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)  *		this last pass.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)  * After everything is set up the IOMMUs are enabled and the necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)  * hotplug and suspend notifiers are registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) static int __init early_amd_iommu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	struct acpi_table_header *ivrs_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	int i, remap_cache_sz, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	u32 pci_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	if (!amd_iommu_detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	status = acpi_get_table("IVRS", 0, &ivrs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	if (status == AE_NOT_FOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	else if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		const char *err = acpi_format_exception(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 		pr_err("IVRS table error: %s\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	 * Validate checksum here so we don't need to do it when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	 * we actually parse the table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	ret = check_ivrs_checksum(ivrs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	ivinfo_init(ivrs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	 * First parse ACPI tables to find the largest Bus/Dev/Func
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	 * we need to handle. Upon this information the shared data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	 * structures for the IOMMUs in the system will be allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	ret = find_last_devid_acpi(ivrs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	/* Device table - directly used by all IOMMUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	amd_iommu_dev_table = (void *)__get_free_pages(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 				      GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 				      get_order(dev_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	if (amd_iommu_dev_table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	 * IOMMU see for that device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 			get_order(alias_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	if (amd_iommu_alias_table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	/* IOMMU rlookup table - find the IOMMU for a specific device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	amd_iommu_rlookup_table = (void *)__get_free_pages(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 			GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 			get_order(rlookup_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	if (amd_iommu_rlookup_table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 					    GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 					    get_order(MAX_DOMAIN_ID/8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	if (amd_iommu_pd_alloc_bitmap == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	 * let all alias entries point to itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	for (i = 0; i <= amd_iommu_last_bdf; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		amd_iommu_alias_table[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	 * never allocate domain 0 because its used as the non-allocated and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	 * error value placeholder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	__set_bit(0, amd_iommu_pd_alloc_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	 * now the data structures are allocated and basically initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	 * start the real acpi table scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	ret = init_iommu_all(ivrs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	/* Disable IOMMU if there's Stoney Ridge graphics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		pci_id = read_pci_config(0, i, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 			pr_info("Disable IOMMU on Stoney Ridge\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 			amd_iommu_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	/* Disable any previously enabled IOMMUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	if (!is_kdump_kernel() || amd_iommu_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		disable_iommus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	if (amd_iommu_irq_remap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		amd_iommu_irq_remap = check_ioapic_information();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	if (amd_iommu_irq_remap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		 * Interrupt remapping enabled, create kmem_cache for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		 * remapping tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 			remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 			remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 							remap_cache_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 							IRQ_TABLE_ALIGNMENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 							0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 		if (!amd_iommu_irq_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 		irq_lookup_table = (void *)__get_free_pages(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 				GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 				get_order(rlookup_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		kmemleak_alloc(irq_lookup_table, rlookup_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 			       1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		if (!irq_lookup_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	ret = init_memory_definitions(ivrs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	/* init the device table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	init_device_table();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	/* Don't leak any ACPI memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	acpi_put_table(ivrs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	ivrs_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) static int amd_iommu_enable_interrupts(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 	for_each_iommu(iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		ret = iommu_init_msi(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) static bool detect_ivrs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	struct acpi_table_header *ivrs_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	status = acpi_get_table("IVRS", 0, &ivrs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	if (status == AE_NOT_FOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	else if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 		const char *err = acpi_format_exception(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		pr_err("IVRS table error: %s\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	acpi_put_table(ivrs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	/* Make sure ACS will be enabled during PCI probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	pci_request_acs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)  * AMD IOMMU Initialization State Machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)  ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) static int __init state_next(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	switch (init_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	case IOMMU_START_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 		if (!detect_ivrs()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 			init_state	= IOMMU_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 			ret		= -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 			init_state	= IOMMU_IVRS_DETECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	case IOMMU_IVRS_DETECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		ret = early_amd_iommu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 			pr_info("AMD IOMMU disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 			init_state = IOMMU_CMDLINE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	case IOMMU_ACPI_FINISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 		early_enable_iommus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 		x86_platform.iommu_shutdown = disable_iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 		init_state = IOMMU_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	case IOMMU_ENABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		register_syscore_ops(&amd_iommu_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 		ret = amd_iommu_init_pci();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		enable_iommus_v2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	case IOMMU_PCI_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		ret = amd_iommu_enable_interrupts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	case IOMMU_INTERRUPTS_EN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		ret = amd_iommu_init_dma_ops();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	case IOMMU_DMA_OPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		init_state = IOMMU_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	case IOMMU_INITIALIZED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 		/* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	case IOMMU_NOT_FOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	case IOMMU_INIT_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	case IOMMU_CMDLINE_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 		/* Error states => do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 		/* Unknown state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 		free_dma_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		if (!irq_remapping_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 			disable_iommus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 			free_iommu_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 			struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 			uninit_device_table_dma();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 			for_each_iommu(iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 				iommu_flush_all_caches(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) static int __init iommu_go_to_state(enum iommu_init_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	while (init_state != state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 		if (init_state == IOMMU_NOT_FOUND         ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		    init_state == IOMMU_INIT_ERROR        ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		    init_state == IOMMU_CMDLINE_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 		ret = state_next();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) #ifdef CONFIG_IRQ_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) int __init amd_iommu_prepare(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	amd_iommu_irq_remap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	return amd_iommu_irq_remap ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) int __init amd_iommu_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	ret = iommu_go_to_state(IOMMU_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	irq_remapping_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	return amd_iommu_xt_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) void amd_iommu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	amd_iommu_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) int amd_iommu_reenable(int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	amd_iommu_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) int __init amd_iommu_enable_faulting(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	/* We enable MSI later when PCI is initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)  * This is the core init function for AMD IOMMU hardware in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)  * This function is called from the generic x86 DMA layer initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)  * code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) static int __init amd_iommu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	ret = iommu_go_to_state(IOMMU_INITIALIZED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) #ifdef CONFIG_GART_IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	if (ret && list_empty(&amd_iommu_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 		 * We failed to initialize the AMD IOMMU - try fallback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 		 * to GART if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 		gart_iommu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	for_each_iommu(iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 		amd_iommu_debugfs_setup(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) static bool amd_iommu_sme_check(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	if (!sme_active() || (boot_cpu_data.x86 != 0x17))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	/* For Fam17h, a specific level of support is required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	if (boot_cpu_data.microcode >= 0x08001205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	if ((boot_cpu_data.microcode >= 0x08001126) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	    (boot_cpu_data.microcode <= 0x080011ff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	pr_notice("IOMMU not currently supported when SME is active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)  * Early detect code. This code runs at IOMMU detection time in the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)  * layer. It just looks if there is an IVRS ACPI table to detect AMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)  * IOMMUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)  ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) int __init amd_iommu_detect(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	if (no_iommu || (iommu_detected && !gart_iommu_aperture))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	if (!amd_iommu_sme_check())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	amd_iommu_detected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 	iommu_detected = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	x86_init.iommu.iommu_init = amd_iommu_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)  * Parsing functions for the AMD IOMMU specific kernel command line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)  * options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)  ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) static int __init parse_amd_iommu_dump(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	amd_iommu_dump = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) static int __init parse_amd_iommu_intr(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	for (; *str; ++str) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 		if (strncmp(str, "legacy", 6) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 		if (strncmp(str, "vapic", 5) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) static int __init parse_amd_iommu_options(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	for (; *str; ++str) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		if (strncmp(str, "fullflush", 9) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 			amd_iommu_unmap_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 		if (strncmp(str, "off", 3) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 			amd_iommu_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 		if (strncmp(str, "force_isolation", 15) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 			amd_iommu_force_isolation = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) static int __init parse_ivrs_ioapic(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	unsigned int bus, dev, fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	int ret, id, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	if (ret != 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 		pr_err("Invalid command line: ivrs_ioapic%s\n", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	if (early_ioapic_map_size == EARLY_MAP_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 		pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 			str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	cmdline_maps			= true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	i				= early_ioapic_map_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 	early_ioapic_map[i].id		= id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	early_ioapic_map[i].devid	= devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	early_ioapic_map[i].cmd_line	= true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) static int __init parse_ivrs_hpet(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	unsigned int bus, dev, fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	int ret, id, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	if (ret != 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 		pr_err("Invalid command line: ivrs_hpet%s\n", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	if (early_hpet_map_size == EARLY_MAP_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 			str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	cmdline_maps			= true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	i				= early_hpet_map_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 	early_hpet_map[i].id		= id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	early_hpet_map[i].devid		= devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	early_hpet_map[i].cmd_line	= true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) static int __init parse_ivrs_acpihid(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	u32 bus, dev, fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	char *hid, *uid, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	if (ret != 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 		pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	p = acpiid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	hid = strsep(&p, ":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	uid = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	if (!hid || !(*hid) || !uid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		pr_err("Invalid command line: hid or uid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 	i = early_acpihid_map_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	early_acpihid_map[i].devid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 		((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	early_acpihid_map[i].cmd_line	= true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) __setup("amd_iommu_dump",	parse_amd_iommu_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) __setup("amd_iommu=",		parse_amd_iommu_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) __setup("amd_iommu_intr=",	parse_amd_iommu_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) __setup("ivrs_ioapic",		parse_ivrs_ioapic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) __setup("ivrs_hpet",		parse_ivrs_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) __setup("ivrs_acpihid",		parse_ivrs_acpihid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) IOMMU_INIT_FINISH(amd_iommu_detect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 		  gart_iommu_hole_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 		  NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		  NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) bool amd_iommu_v2_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 	return amd_iommu_v2_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) EXPORT_SYMBOL(amd_iommu_v2_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) struct amd_iommu *get_amd_iommu(unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 	struct amd_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 	for_each_iommu(iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 		if (i++ == idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 			return iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) EXPORT_SYMBOL(get_amd_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)  * IOMMU EFR Performance Counter support functionality. This code allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)  * access to the IOMMU PC functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)  ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) u8 amd_iommu_pc_get_max_banks(unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	struct amd_iommu *iommu = get_amd_iommu(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	if (iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		return iommu->max_banks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) bool amd_iommu_pc_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	return amd_iommu_pc_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) EXPORT_SYMBOL(amd_iommu_pc_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) u8 amd_iommu_pc_get_max_counters(unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	struct amd_iommu *iommu = get_amd_iommu(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	if (iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 		return iommu->max_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 				u8 fxn, u64 *value, bool is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	u32 max_offset_lim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 	/* Make sure the IOMMU PC resource is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	if (!amd_iommu_pc_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	/* Check for valid iommu and pc register indexing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	/* Limit the offset to the hw defined mmio region aperture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 				(iommu->max_counters << 8) | 0x28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	if ((offset < MMIO_CNTR_REG_OFFSET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 	    (offset > max_offset_lim))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	if (is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 		u64 val = *value & GENMASK_ULL(47, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 		writel((u32)val, iommu->mmio_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 		writel((val >> 32), iommu->mmio_base + offset + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 		*value = readl(iommu->mmio_base + offset + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 		*value <<= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 		*value |= readl(iommu->mmio_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 		*value &= GENMASK_ULL(47, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) EXPORT_SYMBOL(amd_iommu_pc_get_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) EXPORT_SYMBOL(amd_iommu_pc_set_reg);