^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Joerg Roedel <jroedel@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef AMD_IOMMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define AMD_IOMMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "amd_iommu_types.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) extern int amd_iommu_get_num_iommus(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) extern int amd_iommu_init_dma_ops(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) extern int amd_iommu_init_passthrough(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) extern void amd_iommu_apply_erratum_63(u16 devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) extern int amd_iommu_init_devices(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) extern void amd_iommu_uninit_devices(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) extern void amd_iommu_init_notifier(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) extern int amd_iommu_init_api(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #ifdef CONFIG_AMD_IOMMU_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Needed for interrupt remapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) extern int amd_iommu_prepare(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) extern int amd_iommu_enable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) extern void amd_iommu_disable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) extern int amd_iommu_reenable(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) extern int amd_iommu_enable_faulting(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) extern int amd_iommu_guest_ir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* IOMMUv2 specific functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct iommu_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) extern bool amd_iommu_v2_supported(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) extern struct amd_iommu *get_amd_iommu(unsigned int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) extern u8 amd_iommu_pc_get_max_banks(unsigned int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) extern bool amd_iommu_pc_supported(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) extern u8 amd_iommu_pc_get_max_counters(unsigned int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u8 fxn, u64 *value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u8 fxn, u64 *value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) extern int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u64 address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #ifdef CONFIG_IRQ_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define PPR_SUCCESS 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define PPR_INVALID 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define PPR_FAILURE 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) extern int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int status, int tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline bool is_rd890_iommu(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return !!(iommu->features & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static inline u64 iommu_virt_to_phys(void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return (u64)__sme_set(virt_to_phys(vaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline void *iommu_phys_to_virt(unsigned long paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return phys_to_virt(__sme_clr(paddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) extern bool translation_pre_enabled(struct amd_iommu *iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) extern int __init add_special_device(u8 type, u8 id, u16 *devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) bool cmd_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #ifdef CONFIG_DMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) void amd_iommu_apply_ivrs_quirks(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline void amd_iommu_apply_ivrs_quirks(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif